Seata分布式事务使用

1.版本说明

https://github.com/alibaba/spring-cloud-alibaba/wiki/%E7%89%88%E6%9C%AC%E8%AF%B4%E6%98%8E

2.建立Seata Server数据库(TC-带头大哥的数据库)

incubator-seata/script/server/db at v1.6.1 · apache/incubator-seata · GitHub

3.业务库建表

https://github.com/apache/incubator-seata/blob/v1.6.1/script/client/at/db/mysql.sql

CREATE TABLE `undo_log` (
  `id` bigint(20) NOT NULL AUTO_INCREMENT,
  `branch_id` bigint(20) NOT NULL,
  `xid` varchar(100) NOT NULL,
  `context` varchar(128) NOT NULL,
  `rollback_info` longblob NOT NULL,
  `log_status` int(11) NOT NULL,
  `log_created` datetime NOT NULL,
  `log_modified` datetime NOT NULL,
  `ext` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`),
  UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

2.安装Seata-Server

2.1 编写docker-compose.yml文件

参考Docker compose部署 | Apache Seata

先把配置文件使用docker cp copy到指定目录,然后编写docker-compose.yml

version: "3.1"
services:
  seata-server:
    image: seataio/seata-server:1.6.1
    ports:
      - "7091:7091"
      - "8091:8091"
    environment:
      - STORE_MODE=db
      # 以SEATA_IP作为host注册seata server
      - SEATA_IP=192.168.11.99
      - SEATA_PORT=8091
    volumes:
      - "/usr/share/zoneinfo/Asia/Shanghai:/etc/localtime"        #设置系统时区
      - "/usr/share/zoneinfo/Asia/Shanghai:/etc/timezone"  #设置时区
      # 假设我们通过docker cp命令把资源文件拷贝到相对路径`./seata-server/resources`中
      # 如有问题,请阅读上面的[注意事项]以及[使用自定义配置文件]
      - "./seata-server/resources:/seata-server/resources"

2.2 修改 application.yml文件

docker cp 9bb:/seata-server/resources /opt/seata/seata-server/




server:
  port: 7091

spring:
  application:
    name: seata-server



console:
  user:
    username: seata
    password: seata

seata:
  config:
    # support: nacos, consul, apollo, zk, etcd3
    type: nacos
    nacos:
      server-addr: 192.168.11.99:8848
      namespace: public
      group: DEFAULT_GROUP
      username: nacos
      password: nacos
      context-path:
      ##if use MSE Nacos with auth, mutex with username/password attribute
      #access-key:
      #secret-key:
      data-id: seataServer.properties
  registry:
    # support: nacos, eureka, redis, zk, consul, etcd3, sofa
    type: nacos
    nacos:
      application: seata-server
      server-addr: 192.168.11.99:8848
      group: DEFAULT_GROUP
      namespace: public
      cluster: default
      username: nacos
      password: nacos
      context-path:
      ##if use MSE Nacos with auth, mutex with username/password attribute
      #access-key:
      #secret-key:
#  server:
#    service-port: 8091 #If not configured, the default is '${server.port} + 1000'
  security:
    secretKey: SeataSecretKey0c382ef121d778043159209298fd40bf3850a017
    tokenValidityInMilliseconds: 1800000
    ignore:
      urls: /,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-fe/public/**,/api/v1/auth/login
logging:
  config: classpath:logback-spring.xml
  file:
    path: ${user.home}/logs/seata
  extend:
    logstash-appender:
      destination: 127.0.0.1:4560
    kafka-appender:
      bootstrap-servers: 127.0.0.1:9092
      topic: logback_to_logstash

2.3 去Nacos配置对应的信息

#For details about configuration items, see https://seata.io/zh-cn/docs/user/configurations.html
#Transport configuration, for client and server
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableTmClientBatchSendRequest=false
transport.enableRmClientBatchSendRequest=true
transport.enableTcServerBatchSendResponse=false
transport.rpcRmRequestTimeout=30000
transport.rpcTmRequestTimeout=30000
transport.rpcTcRequestTimeout=30000
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
transport.serialization=seata
transport.compressor=none

#Transaction routing rules configuration, only for the client
# 此处的mygroup名字可以自定义,但是最好不要改保存默认
service.vgroupMapping.mygroup=default
#If you use a registry, you can ignore it
service.default.grouplist=127.0.0.1:8091
service.enableDegrade=false
service.disableGlobalTransaction=false

#Transaction rule configuration, only for the client
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=true
client.rm.tableMetaCheckerInterval=60000
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.rm.sagaJsonParser=fastjson
client.rm.tccActionInterceptorOrder=-2147482648
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
client.tm.defaultGlobalTransactionTimeout=60000
client.tm.degradeCheck=false
client.tm.degradeCheckAllowTimes=10
client.tm.degradeCheckPeriod=2000
client.tm.interceptorOrder=-2147482648
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.undo.compress.enable=true
client.undo.compress.type=zip
client.undo.compress.threshold=64k
#For TCC transaction mode
tcc.fence.logTableName=tcc_fence_log
tcc.fence.cleanPeriod=1h

#Log rule configuration, for client and server
log.exceptionRate=100

#Transaction storage configuration, only for the server. The file, db, and redis configuration values are optional.
# 默认为file,一定要改为db,我们自己的服务启动会连接不到seata
store.mode=db
store.lock.mode=db
store.session.mode=db
#Used for password encryption

#These configurations are required if the `store mode` is `db`. If `store.mode,store.lock.mode,store.session.mode` are not equal to `db`, you can remove the configuration block.
# 修改mysql的配置
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://192.168.11.37:3306/seata?useUnicode=true&characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
store.db.user=root
store.db.password=root123
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.distributedLockTable=distributed_lock
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000


#Transaction rule configuration, only for the server
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
server.distributedLockExpireTime=10000
server.xaerNotaRetryTimeout=60000
server.session.branchAsyncQueueSize=5000
server.session.enableBranchAsyncRemove=false
server.enableParallelRequestHandle=false

#Metrics configuration, only for the server
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898

2.4 启动

启动之后就可以看到服务注册成功

也可以直接查看控制台页面

3.SpringCloud整合Seata

业务场景:

用户下单,整个业务逻辑由两个微服务构成:

  1. 库存服务:对给定的商品扣除库存数量。

  2. 订单服务:根据需求创建订单。

3.1 pom.xml依赖引入

        <dependency>
            <groupId>com.alibaba.cloud</groupId>
            <artifactId>spring-cloud-starter-alibaba-seata</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba.cloud</groupId>
            <artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>

        </dependency>
         <dependency>
            <groupId>org.mybatis.spring.boot</groupId>
            <artifactId>mybatis-spring-boot-starter</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.cloud</groupId>
            <artifactId>spring-cloud-starter-openfeign</artifactId>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid-spring-boot-starter</artifactId>
        </dependency>

3.2 bootstrap.yml

llogging:
  level:
    root: error
    com:
      beiyou: debug
    io:
      seata: info
server:
  port: 8080
spring:
  application:
    name: bm-order-service
  cloud:
    alibaba:
    nacos:
      discovery:
        server-addr: 192.168.21.100:8848
  datasource:
    type: com.alibaba.druid.pool.DruidDataSource
    druid:
      driver-class-name: com.mysql.cj.jdbc.Driver #指定连接数据库的JDBC驱动类名
      initial-size: 10 #连接池初始化时创建的连接数
      max-active: 100 #连接池中最大可活动的连接数
      max-wait: 60000 #当没有可用连接时,请求连接的最大等待时间(以毫秒为单位)。这里设置为60000毫秒(即1分钟)。如果在指定时间内仍然无法获得连接,则会抛出异常。
      min-idle: 10 #连接池中的最小空闲连接数。设置为10表示即使在空闲状态下,连接池也会保持至少10个连接。这有助于减少频繁地创建和销毁连接所带来的开销。
      username: root
      password: root123
      url: jdbc:mysql://192.168.21.37:3306/beimao_order?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&allowMultiQueries=true
seata:
  enabled: true
  enable-auto-data-source-proxy: true
  tx-service-group: my_test_tx_group # 事务分组名称,要和服务端对应
  service:
    vgroup-mapping:
      my_test_tx_group: default # key是事务分组名称 value要和服务端的保持一致
    disable-global-transaction: false
  config:
    type: nacos
    nacos:
      server-addr: 192.168.21.100:8848
      group: DEFAULT_GROUP
      namespace: #如果是public namespace,则不需要配置或者为空即可
      username: nacos
      password: nacos
      data-id: seataServer.properties
  registry:
    type: nacos
    nacos:
      server-addr: 192.168.21.100:8848
      application: seata-server
      namespace: #如果是public namespace,则不需要配置或者为空即可
      username: nacos
      password: nacos
      cluster: default
      group: DEFAULT_GROUP
  client:
    rm:
      report-success-enable: false

mybatis:
  mapperLocations: classpath:mappers/*.xml
  typeAliasesPackage: com.beiyou.model

3.3 启动服务

我们自己的服务要与seata-server服务在同一个namespace下,查看服务是否启动成功。

3.4 使用

3.4.1 准备数据表

beimao_stock库存库添加如下表

CREATE TABLE `inventory` (
  `id` int NOT NULL AUTO_INCREMENT COMMENT '唯一id',
  `productId` int NOT NULL COMMENT '商品Id',
  `qty` int DEFAULT '0' COMMENT '库存数量',
  PRIMARY KEY (`id`)
) 

beimao_order添加以下表

CREATE TABLE `order_master` (
  `id` bigint NOT NULL COMMENT '订单id',
  `name` varchar(100) DEFAULT NULL COMMENT '用户名',
  `phone` varchar(11) DEFAULT NULL COMMENT '手机号',
  PRIMARY KEY (`id`)
) CREATE TABLE `order_item` (
  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '唯一id',
  `orderId` bigint NOT NULL COMMENT '订单id',
  `productName` varchar(100) DEFAULT NULL COMMENT '商品名称',
  `productId` int DEFAULT NULL COMMENT '商品id',
  `qty` int DEFAULT NULL COMMENT '购买数量',
  PRIMARY KEY (`id`)
) 

@Mapper
public interface OrderMasterDao {

    @Insert("insert into order_master (id,name,phone) values (#{id},#{name},#{phone})")
    Integer insert(Ordering ordering);
}
@Mapper
public interface OrderItemDao {

    @Insert("insert into order_item (orderId,productName,productId,qty) values (#{orderId},#{productName},#{productId},#{qty})")
    Integer insert(OrderingProduct ordering);
}

@Mapper
public interface InventoryDao {

    @Update("update inventory set qty = qty - #{qty} where productId=#{productId}")
    Integer deduct(Integer productId, Integer qty);
}

3.4.2在外层接口上增加@GlobalTransactional注解即可

 
    @GlobalTransactional(name="createOrder")
    public String ordering(){
        orderDao.insert();
        storageService.deduct(2);
        return "ok";
    }

3.5 验证结果

测试(略.......)

分布式事务成功,模拟正常下单、扣库存,

分布式事务失败,模拟下单扣库存成功,事务是否回滚

4.原理:

1.长事务分成多个短事务

2.每个业务库有自己的undo_log 表: 业务sql操作之前和之后的镜像数据。 如何需要回滚使用undo_log数据恢复,正常成功后 异步删除undo_log无用数据。

优势

锁资源时间短,效率高. 支持多个数据库的事务。

涉及的表:

Tc global_table 全局 xid

branch_table 分支的信息

lock_table 此时此刻锁的表(行锁)

业务库 undo_log

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

这孩子叫逆

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值