注意: 使用 seata-spring-boot-starter 不需要手动配置数据源代理和不需要在每个微服务下面加file.conf和register.conf的文件
- 第一步: 创建seata 数据库 和 在参与seata的所有业务数据库中添加undo_log 事物回滚日志表
- 创建seata 数据库脚本
CREATE TABLE IF NOT EXISTS `global_table`
(
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`status` TINYINT NOT NULL,
`application_id` VARCHAR(32),
`transaction_service_group` VARCHAR(32),
`transaction_name` VARCHAR(128),
`timeout` INT,
`begin_time` BIGINT,
`application_data` VARCHAR(2000),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`xid`),
KEY `idx_gmt_modified_status` (`gmt_modified`, `status`),
KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
`branch_id` BIGINT NOT NULL,
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`resource_group_id` VARCHAR(32),
`resource_id` VARCHAR(256),
`branch_type` VARCHAR(8),
`status` TINYINT,
`client_id` VARCHAR(64),
`application_data` VARCHAR(2000),
`gmt_create` DATETIME(6),
`gmt_modified` DATETIME(6),
PRIMARY KEY (`branch_id`),
KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
`row_key` VARCHAR(128) NOT NULL,
`xid` VARCHAR(96),
`transaction_id` BIGINT,
`branch_id` BIGINT NOT NULL,
`resource_id` VARCHAR(256),
`table_name` VARCHAR(32),
`pk` VARCHAR(36),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`row_key`),
KEY `idx_branch_id` (`branch_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
- 创建 undo_log 脚本
CREATE TABLE `undo_log` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`branch_id` bigint(20) NOT NULL,
`xid` varchar(100) NOT NULL,
`context` varchar(128) NOT NULL,
`rollback_info` longblob NOT NULL,
`log_status` int(11) NOT NULL,
`log_created` datetime NOT NULL,
`log_modified` datetime NOT NULL,
`ext` varchar(100) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8 COMMENT='事物回滚日志表';
- 第二步: 去官网下载 seata-server-1.2.0 和 nacos-server-1.2.1
- 第三步: 修改配置信息 目的地址:seata-server-1.2.0\seata\conf
- 修改 file.conf 将store.mode 改为 db 然后修改 db 数据库链接信息
## transaction log store, only used in seata-server
store {
## store mode: file、db
mode = "db"
## file store property
file {
## store location dir
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
maxBranchSessionSize = 16384
# globe session size , if exceeded throws exceptions
maxGlobalSessionSize = 512
# file buffer size , if exceeded allocate new buffer
fileWriteBufferCacheSize = 16384
# when recover batch read size
sessionReloadReadSize = 100
# async, sync
flushDiskMode = async
}
## database store property
db {
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
datasource = "druid"
## mysql/oracle/postgresql/h2/oceanbase etc.
dbType = "mysql"
driverClassName = "com.mysql.cj.jdbc.Driver"
url = "jdbc:mysql://127.0.0.1:3306/seata?useUnicode=true&characterEncoding=UTF-8"
user = "root"
password = "123456"
minConn = 5
maxConn = 30
globalTable = "global_table"
branchTable = "branch_table"
lockTable = "lock_table"
queryLimit = 100
maxWait = 5000
}
}
- 修改 registry.conf 将 registry.type 改为 nacos 和 config.type 改为 nacos 配置自己的nacos-server地址
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "nacos"
nacos {
application = "seata-server"
serverAddr = "localhost:8848"
namespace = "public"
cluster = "default"
username = ""
password = ""
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = 0
password = ""
cluster = "default"
timeout = 0
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "nacos"
nacos {
serverAddr = "localhost:8848"
namespace = "public"
group = "SEATA_GROUP"
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
appId = "seata-server"
apolloMeta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
- 第四步: 使用nacos作为配置中心,我们将seata配置信息config.txt初始到nacos-server中
- 修改config.txt中db数据,配置文件在源码中的目录地址:seata\script\config-center
修改 store.mode=db、store.db.driverClassName、 store.db.url、store.db.user、store.db.password
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.my_test_tx_group=default
service.default.grouplist=127.0.0.1:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.cj.jdbc.Driver
store.db.url=jdbc:mysql://127.0.0.1:3306/seata?useUnicode=true
store.db.user=root
store.db.password=123456
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
client.undo.dataValidation=true
client.undo.logSerialization=jackson
client.undo.onlyCareUpdateColumns=true
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
- 执行初始化配置脚本 nacos-config.sh 在源码中目录地址:seata\script\config-center\nacos
sh nacos-config.sh nacos-server地址
sh nacos-config.sh 127.0.0.1
- 第五步:启动seata-server 和 nacos-server
如果启动过程中没有报错,则启动成功 -
第六步:查看 nacos 中是否注册服务成功
以后修改配置信息 咋可以直接在nacos 上修改
- 第七步:改造参与seata 所有微服务pom.xml
添加seata依赖
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-config</artifactId>
</dependency>
<!-- 分布式事物管理 -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-alibaba-seata</artifactId>
<version>${alibaba.cloud.version}</version>
<exclusions>
<exclusion>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
<version>1.2.0</version>
</dependency>
- 第八步:改造参与seata 所有微服务application.yml
添加seata配置信息 配置文件位于源码目录:seata\script\client\spring
配置信息 根据自己需求精简更改,我是全部拷贝过来的
#================ seata config =======================
seata:
enabled: true
application-id: ${spring.application.name}
tx-service-group: my_test_tx_group
enable-auto-data-source-proxy: true
use-jdk-proxy: false
excludes-for-auto-proxying: firstClassNameForExclude,secondClassNameForExclude
client:
rm:
async-commit-buffer-limit: 1000
report-retry-count: 5
table-meta-check-enable: false
report-success-enable: false
saga-branch-register-enable: false
lock:
retry-interval: 10
retry-times: 30
retry-policy-branch-rollback-on-conflict: true
tm:
commit-retry-count: 5
rollback-retry-count: 5
undo:
data-validation: true
log-serialization: jackson
log-table: undo_log
only-care-update-columns: true
log:
exceptionRate: 100
service:
vgroupMapping:
my_test_tx_group: default
grouplist:
default: 127.0.0.1:8091
enable-degrade: false
disable-global-transaction: false
transport:
shutdown:
wait: 3
thread-factory:
boss-thread-prefix: NettyBoss
worker-thread-prefix: NettyServerNIOWorker
server-executor-thread-prefix: NettyServerBizHandler
share-boss-worker: false
client-selector-thread-prefix: NettyClientSelector
client-selector-thread-size: 1
client-worker-thread-prefix: NettyClientWorkerThread
worker-thread-size: default
boss-thread-size: 1
type: TCP
server: NIO
heartbeat: true
serialization: seata
compressor: none
enable-client-batch-send-request: true
config:
# 使用 nacos 配置中心
type: nacos
nacos:
namespace: public
serverAddr: 127.0.0.1:8848
group: SEATA_GROUP
userName: ""
password: ""
registry:
# 使用 nacos 注册类型,通过nacos 发现 seata-server
type: nacos
nacos:
application: seata-server
server-addr: 127.0.0.1:8848
namespace: public
cluster: default
userName: ""
password: ""
- 第九步:在参与seata 所有微服务Application中添加exclude = {DataSourceAutoConfiguration.class}
@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class})
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}
- 第十步:增加分布式事物xid 传递拦截器
SeataHandlerInterceptor
import io.seata.core.context.RootContext;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public class SeataHandlerInterceptor extends HandlerInterceptorAdapter {
/**
* 拦截前处理
* 将全局事务ID绑定到上下文中
* @param request HttpServletRequest
* @param response HttpServletResponse
* @param handler handler
* @return 是否继续下一步
* @throws Exception 异常
*/
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws Exception {
String currentXid = RootContext.getXID();
String globalXid = request.getHeader(RootContext.KEY_XID);
if (StringUtils.isBlank(currentXid) && StringUtils.isNotBlank(globalXid)) {
RootContext.bind(globalXid);
}
return true;
}
/**
* 拦截后处理
* @param request HttpServletRequest
* @param response HttpServletResponse
* @param handler handler
* @param ex 异常
* @throws Exception 异常
*/
@Override
public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) throws Exception {
String globalXid = request.getHeader(RootContext.KEY_XID);
if (StringUtils.isBlank(globalXid)) {
return;
}
String unBindXid = RootContext.unbind();
//在事务期间被更改过
if (!globalXid.equalsIgnoreCase(unBindXid)) {
RootContext.bind(unBindXid);
}
}
}
SeataRestTemplateInterceptor
import io.seata.core.context.RootContext;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.StringUtils;
import org.springframework.http.HttpRequest;
import org.springframework.http.client.ClientHttpRequestExecution;
import org.springframework.http.client.ClientHttpRequestInterceptor;
import org.springframework.http.client.ClientHttpResponse;
import org.springframework.http.client.support.HttpRequestWrapper;
import java.io.IOException;
@Log4j2
public class SeataRestTemplateInterceptor implements ClientHttpRequestInterceptor {
/**
* RestTemplate请求拦截器
* 在头部设置全局事务ID
* @param request request
* @param body 书序
* @param execution ClientHttpRequestExecution
* @return ClientHttpResponse
* @throws IOException 异常
*/
@Override
public ClientHttpResponse intercept(HttpRequest request, byte[] body, ClientHttpRequestExecution execution) throws IOException {
HttpRequestWrapper requestWrapper = new HttpRequestWrapper(request);
String xid = RootContext.getXID();
if (StringUtils.isNotBlank(xid)) {
requestWrapper.getHeaders().add(RootContext.KEY_XID, xid);
log.info("分布式事务 xid:{}", xid);
}
return execution.execute(requestWrapper, body);
}
}
SeataRestTemplateConfig
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.ClientHttpRequestInterceptor;
import org.springframework.web.client.RestTemplate;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
@Configuration
public class SeataRestTemplateConfig {
@Autowired(
required = false
)
private Collection<RestTemplate> restTemplates;
public SeataRestTemplateConfig(Collection<RestTemplate> restTemplates) {
this.restTemplates = restTemplates;
}
public SeataRestTemplateConfig() {
}
public SeataRestTemplateInterceptor seataRestTemplateInterceptor() {
return new SeataRestTemplateInterceptor();
}
@PostConstruct
public void init() {
if (this.restTemplates != null && this.restTemplates.size() > 0) {
Iterator var1 = this.restTemplates.iterator();
while (var1.hasNext()) {
RestTemplate restTemplate = (RestTemplate) var1.next();
List<ClientHttpRequestInterceptor> interceptors = new ArrayList(restTemplate.getInterceptors());
interceptors.add(this.seataRestTemplateInterceptor());
restTemplate.setInterceptors(interceptors);
}
}
}
}
SeataFeignClientInterceptor
import feign.RequestInterceptor;
import feign.RequestTemplate;
import io.seata.core.context.RootContext;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.lang3.StringUtils;
@Log4j2
public class SeataFeignClientInterceptor implements RequestInterceptor {
@Override
public void apply(RequestTemplate requestTemplate) {
String xid = RootContext.getXID();
if (StringUtils.isNotBlank(xid)) {
requestTemplate.header(RootContext.KEY_XID, xid);
log.info("分布式事务 xid:{}", xid);
}
}
}
WebConfiguration
/***
* 注意: 在一个项目中WebMvcConfigurationSupport只能存在一个,多个的时候,只有一个会生效
*/
@Configuration
public class WebConfiguration extends WebMvcConfigurationSupport {
@Autowired
private RequestMappingHandlerAdapter handlerAdapter;
@Bean
public RequestInterceptor requestInterceptor() {
return new SeataFeignClientInterceptor();
}
@Override
protected void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(new SeataHandlerInterceptor()).addPathPatterns("/**");
super.addInterceptors(registry);
}
}
- 第十一步:因为使用的seata-spring-boot-starter 所以不需要在每个微服务下面加file.conf和register.conf的文件和手动配置数据源代理
- 第十二步:在业务的发起方添加@GlobalTransactional 全局事物,其他微服务参与者不需要配置@GlobalTransactional
全局事物发起方
@Log4j2
@Service
public class OrderServiceImpl extends BaseJpaMongoServiceImpl<OrderInfo, Long> implements OrderService {
@Autowired
private OrderRepository orderRepository;
@Autowired
private RestTemplate restTemplate;
@Autowired
HttpServletRequest request;
private final String PAY_SERVICE_HOST = "http://127.0.0.1:18088/api/v1/verify/order/apportion/s";
public OrderServiceImpl(BaseJpaRepository<OrderInfo, Long> baseRepository) {
super(baseRepository);
}
@Transactional
@Override
public ResultInfo saveRecord(OrderDto record) {
OrderInfo order = new OrderInfo();
order.setOrderNumber(String.valueOf(System.currentTimeMillis()));
order.setOrderName("测试订单");
order.setOrderClassify("1");
order.setOrderStatus((byte) 0);
order.setOrderRemarks("测试下单回滚");
log.info("事务xid{}" + RootContext.getXID());
OrderInfo saveObj = this.orderRepository.save(order);
if (saveObj != null && saveObj.getId() != null) {
HttpHeaders headers = new HttpHeaders();
Enumeration<String> headerNames = request.getHeaderNames();
while (headerNames.hasMoreElements()) {
String key = (String) headerNames.nextElement();
String value = request.getHeader(key);
headers.add(key, value);
}
//调用其他服务
ResultInfo result = restTemplate.postForObject(PAY_SERVICE_HOST, new HttpEntity<String>(headers), ResultInfo.class);
log.info(result.getMessage());
//使用注解开启分布式事务时,若要求事务回滚,必须将异常抛出到事务的发起方,被事务发起方的 @GlobalTransactional 注解感知到。provide 直接抛出异常 或 定义错误码由 consumer 判断再抛出异常。
if (!result.getSuccess()) {
log.info("载入事务{}进行回滚" + RootContext.getXID());
try {
GlobalTransactionContext.reload(RootContext.getXID()).rollback();
} catch (TransactionException e) {
e.printStackTrace();
}
}
}
//int i = 0/0;
return ResultUtil.success();
}
/**
* 在业务的发起方的方法上使用@GlobalTransactional开启全局事务,Seata 会将事务的 xid 通过拦截器添加到调用其他服务的请求中,实现分布式事务
* 业务发起方方法上增加全局事务的注解@GlobalTransactional 其他远端服务的方法中增加注解
@Transactional,表示开启事务。
* 需要将本地事物放到全局事物GlobalTransactional 内层
* @param record
* @return
*/
@GlobalTransactional
@Override
public ResultInfo test(OrderDto record) {
// saveRecord 本身携带本地事物, 将本地事物放到全局事物GlobalTransactional 内层
return saveRecord(record);
}
}
其他微服务事物参与者
@Log4j2
@Service
public class OrderApportionServiceImpl extends BaseJpaMongoServiceImpl<OrderApportion, Long> implements OrderApportionService {
@Autowired
private OrderApportionRepository orderApportionRepository;
public OrderApportionServiceImpl(BaseJpaRepository<OrderApportion, Long> baseRepository) {
super(baseRepository);
}
@Transactional
@Override
public ResultInfo saveRecord(OrderApportionDto record) {
OrderApportion info = new OrderApportion();
info.setOrderId(1L);
info.setOrderNumber("10");
info.setOrderName("测试回滚");
info.setOrderClassify("10");
info.setStatus((byte) 0);
info.setUserId(1L);
info.setApportionNumber("110");
info.setRemarks("测试");
log.info("事务xid{}" + RootContext.getXID());
OrderApportion saveObj = this.orderApportionRepository.save(info);
// int i = 0/0;
return ResultUtil.success();
}
}
- 第十三步 启动每个业务微服务,然后开始调试回滚,观察回滚是否成功,重点观察log.info("事务xid{}" + RootContext.getXID()); xid 值是否一致
如果没有回滚每个人遇到的问题不同,如果遇到问题查看文档分析
参考文档:https://seata.io/zh-cn/docs/dev/mode/at-mode.html
常见问题文档:https://seata.io/zh-cn/docs/overview/faq.html