springboot接入seata

本文介绍Seata分布式事务中间件的部署步骤与Spring Boot应用的集成方法,包括数据库准备、配置文件调整、Zookeeper启动、Seata Server启动及应用端配置、依赖添加等关键环节。

1 seata server 部署

1.1 创建seata-server 数据库

-- the table to store GlobalSession data
drop table `global_table`;
create table `global_table` (
  `xid` varchar(128)  not null,
  `transaction_id` bigint,
  `status` tinyint not null,
  `application_id` varchar(64),
  `transaction_service_group` varchar(64),
  `transaction_name` varchar(128),
  `timeout` int,
  `begin_time` bigint,
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`xid`),
  key `idx_gmt_modified_status` (`gmt_modified`, `status`),
  key `idx_transaction_id` (`transaction_id`)
);

-- the table to store BranchSession data
drop table `branch_table`;
create table `branch_table` (
  `branch_id` bigint not null,
  `xid` varchar(128) not null,
  `transaction_id` bigint ,
  `resource_group_id` varchar(128),
  `resource_id` varchar(256) ,
  `lock_key` varchar(256) ,
  `branch_type` varchar(8) ,
  `status` tinyint,
  `client_id` varchar(64),
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`branch_id`),
  key `idx_xid` (`xid`)
);

-- the table to store lock data
drop table `lock_table`;
create table `lock_table` (
  `row_key` varchar(128) not null,
  `xid` varchar(128),
  `transaction_id` long ,
  `branch_id` long,
  `resource_id` varchar(256) ,
  `table_name` varchar(64) ,
  `pk` varchar(128) ,
  `gmt_create` datetime ,
  `gmt_modified` datetime,
  primary key(`row_key`)
);

1.2 修改file.conf


## transaction log store, only used in seata-server
store {
  ## store mode: file、db、redis
  mode = "db"

  ## database store property
  db {
    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
    datasource = "druid"
    ## mysql/oracle/postgresql/h2/oceanbase etc.
    dbType = "mysql"
    driverClassName = "com.mysql.jdbc.Driver"
    url = "jdbc:mysql://127.0.0.1:3306/seata_server"
    user = "root"
    password = ""
    minConn = 5
    maxConn = 30
    globalTable = "global_table"
    branchTable = "branch_table"
    lockTable = "lock_table"
    queryLimit = 100
    maxWait = 5000
  }

}
lock {
    ## the data row lock store mode: local_db、memory or db
    mode = "db"

    memory{
         ## store lock in memory of server
    }

    db{
        ## use db of server to store lock, the db is ${store.db.url}
        lock-table= "lock_table"
    }

}

1.3 修改registry.conf

registry {
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  type = "zk"
  zk {
    cluster = "default"
    serverAddr = "127.0.0.1:2181"
    sessionTimeout = 6000
    connectTimeout = 2000
    username = ""
    password = ""
  }
}

config {
  # file、nacos 、apollo、zk、consul、etcd3
  type = "file"
  file {
    name = "file.conf"
  }
}

1.4 启动zookeeper

1.5 启动seata-server

Linux/Unix/Mac

sh seata-server.sh $LISTEN_PORT $STORE_MODE $IP(此参数可选)

Windows

 cmd seata-server.bat $LISTEN_PORT $PATH_FOR_PERSISTENT_DATA $IP(此参数可选)

2 应用接入

2.1 增加配置

# =========================Seata Config===============================
seata.enabled = true
seata.enable-auto-data-source-proxy = true
seata.application-id = hisaggre-api
seata.tx-service-group = pms
seata.client.rm-report-success-enable = true
seata.client.rm-table-meta-check-enable = false
seata.client.rm-report-retry-count = 5
seata.client.rm-async-commit-buffer-limit = 10000
seata.client.rm.lock.lock-retry-internal = 10
seata.client.rm.lock.lock-retry-times = 30
seata.client.rm.lock.lock-retry-policy-branch-rollback-on-conflict = true
seata.client.tm-commit-retry-count = 3
seata.client.tm-rollback-retry-count = 3
seata.client.undo.undo-data-validation = true
seata.client.undo.undo-log-serialization = jackson
seata.client.undo.undo-log-table = undo_log
seata.client.log.exceptionRate = 100
seata.client.support.spring.datasource-autoproxy = true
seata.service.vgroup-mapping.pms = default
seata.service.enable-degrade = false
seata.service.disable-global-transaction = false
seata.service.grouplist.default = 127.0.0.1:8091
seata.transport.shutdown.wait = 3
seata.transport.thread-factory.boss-thread-prefix = NettyBoss
seata.transport.thread-factory.worker-thread-prefix = NettyServerNIOWorker
seata.transport.thread-factory.server-executor-thread-prefix = NettyServerBizHandler
seata.transport.thread-factory.share-boss-worker = false
seata.transport.thread-factory.client-selector-thread-prefix = NettyClientSelector
seata.transport.thread-factory.client-selector-thread-size = 1
seata.transport.thread-factory.client-worker-thread-prefix = NettyClientWorkerThread
seata.transport.type = TCP
seata.transport.server = NIO
seata.transport.heartbeat = true
seata.transport.serialization = seata
seata.transport.compressor = none
seata.transport.enable-client-batch-send-request = true
seata.registry.file.name = file.conf
seata.registry.type = zk
seata.config.file.name = file.conf
seata.config.type = file

2.2 增加依赖

core build.gradle 增加以下依赖

compile("io.seata:seata-spring-boot-starter:1.3.0")
compile("com.101tec:zkclient:0.10")

2.3 xid(分布式事务id)传递

2.3.1 增加 XidInterceptor
import io.seata.core.context.RootContext;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
import org.springframework.web.servlet.HandlerInterceptor;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

@Component
public class XidInterceptor implements HandlerInterceptor {
    
    /**
     * 在请求处理之前进行调用(Controller方法调用之前)
     */
    @Override
    public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
        HttpServletRequest httpRequest = (HttpServletRequest) request;
        String xid = httpRequest.getHeader("xid");
        if (StringUtils.isNotEmpty(xid)) {
            RootContext.bind(xid);
        }
        return true;
    }
}

2.3.2 配置webconfig
@Configuration
@EnableSpringDataWebSupport
public class WebConfig implements WebMvcConfigurer {

省略一些代码..................

@Override
    public void addInterceptors(InterceptorRegistry registry) {
        //注册TestInterceptor拦截器
        InterceptorRegistration registration = registry.addInterceptor(new XidInterceptor());
        registration.addPathPatterns("/**");                      //所有路径都被拦截
        registration.excludePathPatterns(                         //添加不拦截路径
                "你的登陆路径",            //登录
                "/**/*.html",            //html静态资源
                "/**/*.js",              //js静态资源
                "/**/*.css",             //css静态资源
                "/**/*.woff",
                "/**/*.ttf"
        );
    }
}
2.3.3 自定义 HystrixConcurrencyStrategy

import com.netflix.hystrix.strategy.concurrency.HystrixConcurrencyStrategy;
import io.seata.core.context.RootContext;

import java.util.concurrent.Callable;

public class XidHystrixConcurrencyStrategy extends HystrixConcurrencyStrategy {

    @Override
    public <T> Callable<T> wrapCallable(Callable<T> callable) {
        return new XidAwareCallable<>(callable, RootContext.getXID());
    }

    static class XidAwareCallable<T> implements Callable<T> {

        private final Callable<T>       delegate;
        private final String xid;

        public XidAwareCallable(Callable<T> callable, String xid) {
            this.delegate = callable;
            this.xid = xid;
        }

        @Override
        public T call() throws Exception {
            try {
                RootContext.bind(xid);
                return delegate.call();
            } finally {
            }
        }
    }

}
2.3.4 配置 HystrixConfig

import com.netflix.hystrix.contrib.javanica.aop.aspectj.HystrixCommandAspect;
import com.netflix.hystrix.strategy.HystrixPlugins;
import com.netflix.hystrix.strategy.concurrency.HystrixConcurrencyStrategy;
import com.netflix.hystrix.strategy.eventnotifier.HystrixEventNotifier;
import com.netflix.hystrix.strategy.executionhook.HystrixCommandExecutionHook;
import com.netflix.hystrix.strategy.metrics.HystrixMetricsPublisher;
import com.netflix.hystrix.strategy.properties.HystrixPropertiesStrategy;
import com.xingren.v.logging.annotations.Slf4j;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import javax.annotation.PostConstruct;

@Configuration
@Slf4j
public class HystrixConfig {

    @Bean
    public HystrixCommandAspect hystrixAspect() {
        return new HystrixCommandAspect();
    }

    @PostConstruct
    public void init() {
        try {
            HystrixConcurrencyStrategy target = new XidHystrixConcurrencyStrategy();
            HystrixConcurrencyStrategy strategy = HystrixPlugins.getInstance().getConcurrencyStrategy();
            if (strategy instanceof XidHystrixConcurrencyStrategy) {
                // Welcome to singleton hell...
                return;
            }
            HystrixCommandExecutionHook commandExecutionHook = HystrixPlugins
                    .getInstance().getCommandExecutionHook();
            HystrixEventNotifier eventNotifier = HystrixPlugins.getInstance()
                    .getEventNotifier();
            HystrixMetricsPublisher metricsPublisher = HystrixPlugins.getInstance()
                    .getMetricsPublisher();
            HystrixPropertiesStrategy propertiesStrategy = HystrixPlugins.getInstance()
                    .getPropertiesStrategy();


            HystrixPlugins.reset();
            HystrixPlugins.getInstance().registerConcurrencyStrategy(target);
            HystrixPlugins.getInstance()
                    .registerCommandExecutionHook(commandExecutionHook);
            HystrixPlugins.getInstance().registerEventNotifier(eventNotifier);
            HystrixPlugins.getInstance().registerMetricsPublisher(metricsPublisher);
            HystrixPlugins.getInstance().registerPropertiesStrategy(propertiesStrategy);
        } catch (Exception e) {
            log.error("Failed to register Sleuth Hystrix Concurrency Strategy", e);
        }
    }
}
2.3.5 请求前header 放入xid

以sdk 模式为例:


import com.xingren.org.springframework.security.oauth2.common.OAuth2AccessToken;
import com.xingren.reaper.interceptors.ImplicitAccessTokenInterceptor;
import feign.RequestTemplate;
import io.seata.core.context.RootContext;
import okhttp3.Request;
import okhttp3.Response;

import javax.validation.constraints.NotNull;
import java.io.IOException;

public class XidInterceptor extends ImplicitAccessTokenInterceptor {
    public XidInterceptor(@NotNull String baseUrl,
                          @NotNull String clientId) {
        super(baseUrl, clientId);
    }

    public Response intercept(Chain chain) throws IOException {
        String xid = RootContext.getXID();
        OAuth2AccessToken token = this.getToken();
        Request request = chain.request();
        request = request.newBuilder().removeHeader("Authorization").addHeader("Authorization", "Bearer" + token.getValue().toString()).build();
        request = request.newBuilder().removeHeader("xid").addHeader("xid",
                xid).build();
        return chain.proceed(request);
    }

    public void apply(RequestTemplate requestTemplate) {
        OAuth2AccessToken token = this.getToken();
        requestTemplate.header("Authorization", new String[]{"Bearer" + token.getValue().toString()});
        requestTemplate.header("xid", RootContext.getXID());
    }
}


@Bean
public PurchaseConfigClient purchaseConfigClient() {
    return new PurchaseConfigClient(_pmsClientConfig(), _pmsHystrixCommandConfig(), _pmsHystrixThreadConfig());
}



private PmsClientConfig _pmsClientConfig() {
    PmsClientConfig config = new PmsClientConfig(properties.getHost());
    config.addApplicationInterceptor(new XidInterceptor(reaperProperties.getHost(),
            reaperProperties.getClientId()));
    return config;
}

2.4 启动数据库代理

如果项目本身有数据库代理可能会出现死循环,可以用excludes排除

@SpringBootApplication
@ComponentScan("com.xingren")
@EnableFeignClients("com.xingren")
@EnableAutoDataSourceProxy(excludes = { "org.springframework.jdbc.datasource.TransactionAwareDataSourceProxy" })
public class ApiApplication {
    public static void main(String[] args) {
        SpringApplication.run(ApiApplication.class, args);
    }
}
### Spring Boot、Nacos、Sentinel 和 Seata 的服务器部署教程 #### 一、环境准备 为了成功部署包含Spring Boot、Nacos、Sentinel以及Seata的服务,需先准备好开发环境。确保安装了JDK版本不低于8,并配置好Maven或Gradle作为构建工具。 #### 二、搭建微服务体系结构 创建多个基于Spring Boot的应用程序来代表不同的微服务组件。每个应用都应有自己的`pom.xml`(对于Maven项目)定义依赖关系并引入必要的库支持分布式事务处理等功能[^3]。 #### 三、集成Nacos实现服务发现与配置管理 在各个微服务项目的根目录下找到对应的`application.yml`文件,在其中加入如下所示的Nacos客户端连接设置: ```yaml cloud: nacos: discovery: server-addr: localhost:8848 # 配置Nacos注册中心地址 ``` 上述配置使得应用程序能够向Nacos服务中心注册自己,并从中获取其他所需资源的位置信息[^1]。 #### 四、启动Nacos Server 下载官方发布的最新版Nacos压缩包后解压至本地磁盘任意位置;进入解压后的bin目录执行脚本命令以单机模式开启此服务端实例: ```bash sh startup.sh -m standalone ``` 这一步骤完成后即可通过浏览器访问http://localhost:8848/nacos查看控制台界面[^2]。 #### 五、引入Sentinel限流熔断机制 为了让整个系统具备更好的稳定性和可靠性特性,可以在POM文件里添加sentinel-spring-cloud-starter依赖项从而快速完成框架接入工作。接着按照文档指导调整业务逻辑层代码适配新的API接口调用方式。 #### 六、启用Seata全局事务协调器 针对存在跨数据库操作场景下的数据一致性保障需求,则推荐采用Seata开源解决方案。具体做法是在各参与方工程内分别声明@EnableGlobalTransaction注解激活XA协议兼容模式的同时还要指明TM(事务管理者)/RM(资源管理者)角色身份及其通信参数列表[^4]。 ```java @SpringBootApplication @EnableDiscoveryClient @EnableGlobalTransaction // 开启全局事务功能开关 public class Application { } ``` 此外还需修改datasource部分属性值指向实际使用的MySQL实例地址及认证凭证组合。 ---
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值