分布式任务
比如每天/每周/每月生成日志汇总,定时发送推送信息,定时生成数据表格等
一:Quartz Cluster
1:概述
Quartz Cluster 是 Quartz 提供的持久化方式
Quartz提供两种基本作业存储类型:
RAMJobStore
在默认情况下Quartz将任务调度的运行信息保存在内存中,这种方法提供了最佳的性能,因为内存中数据访问最快。不足之处是缺乏数据的持久性,当程序路途停止或系统崩溃时,所有运行的信息都会丢失。
JobStoreTX <- 持久化说的就是这种
所有的任务信息都会保存到数据库中,可以控制事物,还有就是如果应用服务器关闭或者重启,任务信息都不会丢失,并且可以恢复因服务器关闭或者重启而导致执行失败的任务。
2:代码实例
- 新建一个spring boot项目,导入下面的依赖
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-quartz</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.github.pagehelper</groupId>
<artifactId>pagehelper</artifactId>
<version>5.0.0</version>
</dependency>
</dependencies>
- 编写配置文件(重要)
spring:
# 数据库配置
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: "jdbc:mysql://localhost:3306/quartz_jobs?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai"
username: root
password: 314159
# quartz配置
quartz:
properties:
org:
quartz:
scheduler:
instanceName: clusteredScheduler
instanceId: AUTO
jobStore:
class: org.springframework.scheduling.quartz.LocalDataSourceJobStore # spring 2.5.6之后使用的class是这个
driverDelegateClass: org.quartz.impl.jdbcjobstore.StdJDBCDelegate # 采用jdbc
tablePrefix: QRTZ_
isClustered: true
clusterCheckinInterval: 10000
useProperties: false
threadPool:
class: org.quartz.simpl.SimpleThreadPool
threadCount: 10
threadPriority: 5
threadsInheritContextClassLoaderOfInitializingThread: true
job-store-type: jdbc
- 创建数据库和对应的表,创建数据库
quartz_jobs
CREATE TABLE QRTZ_JOB_DETAILS(
SCHED_NAME VARCHAR(120) NOT NULL,
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
JOB_CLASS_NAME VARCHAR(250) NOT NULL,
IS_DURABLE VARCHAR(1) NOT NULL,
IS_NONCONCURRENT VARCHAR(1) NOT NULL,
IS_UPDATE_DATA VARCHAR(1) NOT NULL,
REQUESTS_RECOVERY VARCHAR(1) NOT NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,JOB_NAME,JOB_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
JOB_NAME VARCHAR(200) NOT NULL,
JOB_GROUP VARCHAR(200) NOT NULL,
DESCRIPTION VARCHAR(250) NULL,
NEXT_FIRE_TIME BIGINT(13) NULL,
PREV_FIRE_TIME BIGINT(13) NULL,
PRIORITY INTEGER NULL,
TRIGGER_STATE VARCHAR(16) NOT NULL,
TRIGGER_TYPE VARCHAR(8) NOT NULL,
START_TIME BIGINT(13) NOT NULL,
END_TIME BIGINT(13) NULL,
CALENDAR_NAME VARCHAR(200) NULL,
MISFIRE_INSTR SMALLINT(2) NULL,
JOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,JOB_NAME,JOB_GROUP)
REFERENCES QRTZ_JOB_DETAILS(SCHED_NAME,JOB_NAME,JOB_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SIMPLE_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
REPEAT_COUNT BIGINT(7) NOT NULL,
REPEAT_INTERVAL BIGINT(12) NOT NULL,
TIMES_TRIGGERED BIGINT(10) NOT NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_CRON_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
CRON_EXPRESSION VARCHAR(120) NOT NULL,
TIME_ZONE_ID VARCHAR(80),
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SIMPROP_TRIGGERS
(
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
STR_PROP_1 VARCHAR(512) NULL,
STR_PROP_2 VARCHAR(512) NULL,
STR_PROP_3 VARCHAR(512) NULL,
INT_PROP_1 INT NULL,
INT_PROP_2 INT NULL,
LONG_PROP_1 BIGINT NULL,
LONG_PROP_2 BIGINT NULL,
DEC_PROP_1 NUMERIC(13,4) NULL,
DEC_PROP_2 NUMERIC(13,4) NULL,
BOOL_PROP_1 VARCHAR(1) NULL,
BOOL_PROP_2 VARCHAR(1) NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_BLOB_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
BLOB_DATA BLOB NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP),
INDEX (SCHED_NAME,TRIGGER_NAME, TRIGGER_GROUP),
FOREIGN KEY (SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP)
REFERENCES QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_CALENDARS (
SCHED_NAME VARCHAR(120) NOT NULL,
CALENDAR_NAME VARCHAR(200) NOT NULL,
CALENDAR BLOB NOT NULL,
PRIMARY KEY (SCHED_NAME,CALENDAR_NAME))
ENGINE=InnoDB;
CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS (
SCHED_NAME VARCHAR(120) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
PRIMARY KEY (SCHED_NAME,TRIGGER_GROUP))
ENGINE=InnoDB;
CREATE TABLE QRTZ_FIRED_TRIGGERS (
SCHED_NAME VARCHAR(120) NOT NULL,
ENTRY_ID VARCHAR(95) NOT NULL,
TRIGGER_NAME VARCHAR(200) NOT NULL,
TRIGGER_GROUP VARCHAR(200) NOT NULL,
INSTANCE_NAME VARCHAR(200) NOT NULL,
FIRED_TIME BIGINT(13) NOT NULL,
SCHED_TIME BIGINT(13) NOT NULL,
PRIORITY INTEGER NOT NULL,
STATE VARCHAR(16) NOT NULL,
JOB_NAME VARCHAR(200) NULL,
JOB_GROUP VARCHAR(200) NULL,
IS_NONCONCURRENT VARCHAR(1) NULL,
REQUESTS_RECOVERY VARCHAR(1) NULL,
PRIMARY KEY (SCHED_NAME,ENTRY_ID))
ENGINE=InnoDB;
CREATE TABLE QRTZ_SCHEDULER_STATE (
SCHED_NAME VARCHAR(120) NOT NULL,
INSTANCE_NAME VARCHAR(200) NOT NULL,
LAST_CHECKIN_TIME BIGINT(13) NOT NULL,
CHECKIN_INTERVAL BIGINT(13) NOT NULL,
PRIMARY KEY (SCHED_NAME,INSTANCE_NAME))
ENGINE=InnoDB;
CREATE TABLE QRTZ_LOCKS (
SCHED_NAME VARCHAR(120) NOT NULL,
LOCK_NAME VARCHAR(40) NOT NULL,
PRIMARY KEY (SCHED_NAME,LOCK_NAME))
ENGINE=InnoDB;
CREATE TABLE QRTZ_TASK_HISTORY (
SCHED_NAME VARCHAR(120) NOT NULL,
INSTANCE_ID VARCHAR(200) NOT NULL,
FIRE_ID VARCHAR(95) NOT NULL,
TASK_NAME VARCHAR(200) NULL,
TASK_GROUP VARCHAR(200) NULL,
FIRED_TIME BIGINT(13) NULL,
FIRED_WAY VARCHAR(8) NULL,
COMPLETE_TIME BIGINT(13) NULL,
EXPEND_TIME BIGINT(13) NULL,
REFIRED INT NULL,
EXEC_STATE VARCHAR(10) NULL,
LOG TEXT NULL,
PRIMARY KEY (FIRE_ID)
)ENGINE=InnoDB;
CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME);
CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE);
CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME);
CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY);
CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP);
CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP);
CREATE INDEX IDX_QRTZ_TK_S ON QRTZ_TASK_HISTORY(SCHED_NAME);
commit;
- 实体类
package com.example.quartz_cluster_demo.entity;
import lombok.Data;
import java.util.Date;
/**
* @author pdai
*/
@Data
public class JobDetails {
private String cronExpression;
private String jobClassName;
private String triggerGroupName;
private String triggerName;
private String jobGroupName;
private String jobName;
private Date nextFireTime;
private Date previousFireTime;
private Date startTime;
private String timeZone;
private String status;
}
- job
package com.example.quartz_cluster_demo.job;
import lombok.extern.slf4j.Slf4j;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.scheduling.quartz.QuartzJobBean;
import java.util.Date;
@Slf4j
public class HelloJob extends QuartzJobBean {
@Override
protected void executeInternal(JobExecutionContext context) throws JobExecutionException {
// 拿到参数
context.getJobDetail().getJobDataMap().forEach(
(k, v) -> log.info("param, key:{}, value:{}", k, v)
);
// 你的代码实现逻辑
log.info("Hello Job执行时间: " + new Date());
}
}
package com.example.quartz_cluster_demo.job;
import lombok.extern.slf4j.Slf4j;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.scheduling.quartz.QuartzJobBean;
import java.time.LocalDateTime;
@Slf4j
public class NewJob extends QuartzJobBean {
@Override
protected void executeInternal(JobExecutionContext context) throws JobExecutionException {
// 拿到参数
context.getJobDetail().getJobDataMap().forEach(
(k, v) -> log.info("param, key:{}, value:{}", k, v)
);
// 你的代码实现逻辑
log.error("New Job执行时间: {}", LocalDateTime.now());
}
}
- manager(核心)
package com.example.quartz_cluster_demo.manager;
import com.example.quartz_cluster_demo.entity.JobDetails;
import com.github.pagehelper.PageHelper;
import com.github.pagehelper.PageInfo;
import org.quartz.*;
import org.quartz.DateBuilder.IntervalUnit;
import org.quartz.impl.matchers.GroupMatcher;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.quartz.QuartzJobBean;
import org.springframework.stereotype.Component;
import java.util.*;
/**
* @author pdai
*/
@Component
public class QuartzManager {
@Autowired
private Scheduler sched;
/**
* 创建or更新任务,存在则更新不存在创建
*
* @param jobClass 任务类
* @param jobName 任务名称
* @param jobGroupName 任务组名称
* @param jobCron cron表达式
*/
public void addOrUpdateJob(Class<? extends QuartzJobBean> jobClass, String jobName, String jobGroupName, String jobCron) {
try {
// 拿到触发器
TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroupName);
CronTrigger trigger = (CronTrigger) sched.getTrigger(triggerKey);
// 如果触发器是空的,添加任务,否则删除任务
if (trigger == null) {
addJob(jobClass, jobName, jobGroupName, jobCron);
} else {
// 如果之前的cron时间和现在设置的一样,相当于什么都没有操作,直接return
if (trigger.getCronExpression().equals(jobCron)) {
return;
}
updateJob(jobName, jobGroupName, jobCron);
}
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 增加一个job
*
* @param jobClass 任务实现类
* @param jobName 任务名称
* @param jobGroupName 任务组名
* @param jobCron cron表达式(如:0/5 * * * * ? )
*/
public void addJob(Class<? extends QuartzJobBean> jobClass, String jobName, String jobGroupName, String jobCron) {
try {
JobDetail jobDetail = JobBuilder.newJob(jobClass).withIdentity(jobName, jobGroupName).build();
Trigger trigger = TriggerBuilder.newTrigger().withIdentity(jobName, jobGroupName)
.startAt(DateBuilder.futureDate(1, IntervalUnit.SECOND))
.withSchedule(CronScheduleBuilder.cronSchedule(jobCron)).startNow().build();
sched.scheduleJob(jobDetail, trigger);
if (!sched.isShutdown()) {
sched.start();
}
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 添加一个任务
*
* @param jobClass 任务class
* @param jobName 任务名称
* @param jobGroupName 任务组名称
* @param jobTime 任务时间
*/
public void addJob(Class<? extends Job> jobClass, String jobName, String jobGroupName, int jobTime) {
addJob(jobClass, jobName, jobGroupName, jobTime, -1);
}
/**
* 添加一个任务
*
* @param jobClass 任务class
* @param jobName 任务名称
* @param jobGroupName 任务组名称
* @param jobTime 任务时间
* @param jobTimes 任务时间
*/
public void addJob(Class<? extends Job> jobClass, String jobName, String jobGroupName, int jobTime, int jobTimes) {
try {
// 任务名称和组构成任务key
JobDetail jobDetail = JobBuilder
.newJob(jobClass)
.withIdentity(jobName, jobGroupName)
.build();
// 使用simpleTrigger规则
Trigger trigger;
if (jobTimes < 0) {
trigger = TriggerBuilder
.newTrigger().withIdentity(jobName, jobGroupName)
.withSchedule(SimpleScheduleBuilder.repeatSecondlyForever(1).withIntervalInSeconds(jobTime))
.startNow()
.build();
} else {
trigger = TriggerBuilder
.newTrigger().withIdentity(jobName, jobGroupName)
.withSchedule(SimpleScheduleBuilder.repeatSecondlyForever(1).withIntervalInSeconds(jobTime).withRepeatCount(jobTimes))
.startNow()
.build();
}
// 加入调度中,并启动
sched.scheduleJob(jobDetail, trigger);
if (!sched.isShutdown()) {
sched.start();
}
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 更新一个job
*
* @param jobName 任务名称
* @param jobGroupName 任务组名称
* @param jobTime 任务时间
*/
public void updateJob(String jobName, String jobGroupName, String jobTime) {
try {
// 先拿到这个任务的触发器
TriggerKey triggerKey = TriggerKey.triggerKey(jobName, jobGroupName);
CronTrigger trigger = (CronTrigger) sched.getTrigger(triggerKey);
// 重新构建触发器
trigger = trigger.getTriggerBuilder()
.withIdentity(triggerKey)
.withSchedule(CronScheduleBuilder.cronSchedule(jobTime)) // 变成新的时间
.build();
// 重启触发器
sched.rescheduleJob(triggerKey, trigger);
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 删除任务一个job
*
* @param jobName 任务名称
* @param jobGroupName 任务组名
*/
public void deleteJob(String jobName, String jobGroupName) {
try {
sched.pauseTrigger(TriggerKey.triggerKey(jobName, jobGroupName));
sched.unscheduleJob(TriggerKey.triggerKey(jobName, jobGroupName));
sched.deleteJob(new JobKey(jobName, jobGroupName));
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 暂停一个job
*
* @param jobName 任务名称
* @param jobGroupName 任务组名称
*/
public void pauseJob(String jobName, String jobGroupName) {
try {
JobKey jobKey = JobKey.jobKey(jobName, jobGroupName);
sched.pauseJob(jobKey);
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 恢复一个job
*
* @param jobName 任务名称
* @param jobGroupName 任务组名称
*/
public void resumeJob(String jobName, String jobGroupName) {
try {
JobKey jobKey = JobKey.jobKey(jobName, jobGroupName);
// 恢复这个job
sched.resumeJob(jobKey);
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 立即执行一个job
*
* @param jobName 任务名称
* @param jobGroupName 任务组名称
*/
public void runAJobNow(String jobName, String jobGroupName) {
try {
// 通过任务名称得到job key
JobKey jobKey = JobKey.jobKey(jobName, jobGroupName);
// 调度器触发这个任务
sched.triggerJob(jobKey);
} catch (SchedulerException e) {
e.printStackTrace();
}
}
/**
* 获取所有的Job
*
* @param pageNum 页码
* @param pageSize 页大小
* @return
*/
public PageInfo<JobDetails> queryAllJobBean(int pageNum, int pageSize) {
// 设置分页
PageHelper.startPage(pageNum, pageSize);
List<JobDetails> jobList = null;
try {
// 拿到所有的JobKey
GroupMatcher<JobKey> matcher = GroupMatcher.anyJobGroup();
Set<JobKey> jobKeys = sched.getJobKeys(matcher);
jobList = new ArrayList<>();
for (JobKey jobKey : jobKeys) {
// 拿到当前任务的触发器
List<? extends Trigger> triggers = sched.getTriggersOfJob(jobKey);
for (Trigger trigger : triggers) {
// 构建JobDetails
JobDetails jobDetails = new JobDetails();
if (trigger instanceof CronTrigger) {
CronTrigger cronTrigger = (CronTrigger) trigger;
jobDetails.setCronExpression(cronTrigger.getCronExpression());
jobDetails.setTimeZone(cronTrigger.getTimeZone().getDisplayName());
}
jobDetails.setTriggerGroupName(trigger.getKey().getName());
jobDetails.setTriggerName(trigger.getKey().getGroup());
jobDetails.setJobGroupName(jobKey.getGroup());
jobDetails.setJobName(jobKey.getName());
jobDetails.setStartTime(trigger.getStartTime());
jobDetails.setJobClassName(sched.getJobDetail(jobKey).getJobClass().getName());
jobDetails.setNextFireTime(trigger.getNextFireTime());
jobDetails.setPreviousFireTime(trigger.getPreviousFireTime());
jobDetails.setStatus(sched.getTriggerState(trigger.getKey()).name());
// 加入到jobList
jobList.add(jobDetails);
}
}
} catch (SchedulerException e) {
e.printStackTrace();
}
return new PageInfo<>(jobList);
}
/**
* 获取所有计划中的任务列表
*
* @return
*/
public List<Map<String, Object>> queryAllJob() {
List<Map<String, Object>> jobList = null;
try {
// 拿到所有计划中的Job
GroupMatcher<JobKey> matcher = GroupMatcher.anyJobGroup();
Set<JobKey> jobKeys = sched.getJobKeys(matcher);
jobList = new ArrayList<>();
for (JobKey jobKey : jobKeys) {
List<? extends Trigger> triggers = sched.getTriggersOfJob(jobKey);
for (Trigger trigger : triggers) {
jobList.add(createMap(jobKey, trigger));
}
}
} catch (SchedulerException e) {
e.printStackTrace();
}
return jobList;
}
/**
* 获取所有正在运行的job
* getCurrentlyExecutingJobs
*
* @return 所有正在运行的job
*/
public List<Map<String, Object>> queryRunJon() {
List<Map<String, Object>> jobList = null;
try {
// 拿到正在执行的Job
List<JobExecutionContext> executingJobs = sched.getCurrentlyExecutingJobs();
jobList = new ArrayList<>(executingJobs.size());
for (JobExecutionContext executingJob : executingJobs) {
JobDetail jobDetail = executingJob.getJobDetail();
JobKey jobKey = jobDetail.getKey();
Trigger trigger = executingJob.getTrigger();
jobList.add(createMap(jobKey, trigger));
}
} catch (SchedulerException e) {
e.printStackTrace();
}
return jobList;
}
/**
* 通过jobkey & trigger构建map
*
* @param jobKey job key
* @param trigger 触发器
* @return map构建
* @throws SchedulerException 调度异常
*/
public Map<String, Object> createMap(JobKey jobKey, Trigger trigger) throws SchedulerException {
Map<String, Object> map = new HashMap<>();
map.put("jobName", jobKey.getName());
map.put("jobGroupName", jobKey.getGroup());
map.put("description", "trigger:" + trigger.getKey());
Trigger.TriggerState triggerState = sched.getTriggerState(trigger.getKey());
map.put("jobStatus", triggerState.name());
if (trigger instanceof CronTrigger) {
CronTrigger cronTrigger = (CronTrigger) trigger;
String cronExpression = cronTrigger.getCronExpression();
map.put("jobTime", cronExpression);
}
return map;
}
}
- controller
package com.example.quartz_cluster_demo.controller;
import com.example.quartz_cluster_demo.entity.JobDetails;
import com.example.quartz_cluster_demo.manager.QuartzManager;
import com.github.pagehelper.PageInfo;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.quartz.QuartzJobBean;
import org.springframework.web.bind.annotation.*;
import java.util.HashMap;
import java.util.Map;
/**
* @author pdai
*/
@RestController
@RequestMapping(value = "/job")
public class JobController {
@Autowired
private QuartzManager qtzManager;
@SuppressWarnings("unchecked")
private static Class<? extends QuartzJobBean> getClass(String classname) throws Exception {
Class<?> class1 = Class.forName(classname);
return (Class<? extends QuartzJobBean>) class1;
}
/**
* @param jobClassName
* @param jobGroupName
* @param cronExpression
* @throws Exception
*/
@PostMapping(value = "/addjob")
public void addjob(@RequestParam(value = "jobClassName") String jobClassName,
@RequestParam(value = "jobGroupName") String jobGroupName,
@RequestParam(value = "cronExpression") String cronExpression) throws Exception {
qtzManager.addOrUpdateJob(getClass(jobClassName), jobClassName, jobGroupName, cronExpression);
}
/**
* @param jobClassName
* @param jobGroupName
* @throws Exception
*/
@PostMapping(value = "/pausejob")
public void pausejob(@RequestParam(value = "jobClassName") String jobClassName,
@RequestParam(value = "jobGroupName") String jobGroupName) throws Exception {
qtzManager.pauseJob(jobClassName, jobGroupName);
}
/**
* @param jobClassName
* @param jobGroupName
* @throws Exception
*/
@PostMapping(value = "/resumejob")
public void resumejob(@RequestParam(value = "jobClassName") String jobClassName,
@RequestParam(value = "jobGroupName") String jobGroupName) throws Exception {
qtzManager.resumeJob(jobClassName, jobGroupName);
}
/**
* @param jobClassName
* @param jobGroupName
* @param cronExpression
* @throws Exception
*/
@PostMapping(value = "/reschedulejob")
public void rescheduleJob(@RequestParam(value = "jobClassName") String jobClassName,
@RequestParam(value = "jobGroupName") String jobGroupName,
@RequestParam(value = "cronExpression") String cronExpression) throws Exception {
qtzManager.addOrUpdateJob(getClass(jobClassName), jobClassName, jobGroupName, cronExpression);
}
/**
* @param jobClassName
* @param jobGroupName
* @throws Exception
*/
@PostMapping(value = "/deletejob")
public void deletejob(@RequestParam(value = "jobClassName") String jobClassName,
@RequestParam(value = "jobGroupName") String jobGroupName) throws Exception {
qtzManager.deleteJob(jobClassName, jobGroupName);
}
/**
* @param pageNum
* @param pageSize
* @return
*/
@GetMapping(value = "/queryjob")
public Map<String, Object> queryjob(@RequestParam(value = "pageNum") Integer pageNum,
@RequestParam(value = "pageSize") Integer pageSize) {
PageInfo<JobDetails> jobAndTrigger = qtzManager.queryAllJobBean(pageNum, pageSize);
Map<String, Object> map = new HashMap<String, Object>();
map.put("JobAndTrigger", jobAndTrigger);
map.put("number", jobAndTrigger.getTotal());
return map;
}
}
二:XXL-Job
三:Elastic-Job
1:概述
Elastic-Job是当当网推出的分布式任务调度框架,用于解决分布式任务的协调调度问题,保证任务不重复不遗漏地执行;
在我们的项目开发中,使用定时任务是避免不了的,我们在部署定时任务时,通常只部署一台机器,如果部署多台机器时,同一个任务会执行多次,比如给用户计算收益定时任务,每天定时给用户计算收益,如果部署了多台,同一个用户将重复计算多次收益(业务错误),但如果只部署一台机器,可用性又无法保证,如果定时任务机器宕机,无法故障转移;
Elastic-Job是当当网在2015年开源的基于Zookepper、Quartz开发的Java分布式定时任务解决方案,它由两个相互独立的子项目Elastic-Job-Lite和Elastic-Job-Cloud组成;
ElasticJob已于 2020 年 5 月 28 日成为 Apache ShardingSphere 的子项目;
使用 ElasticJob 能够让开发工程师不再担心任务的线性吞吐量提升等非功能需求,使他们能够更加专注于面向业务编码设计;
同时,它也能够解放运维工程师,使他们不必再担心任务的可用性和相关管理需求,只通过轻松的增加服务节点即可达到自动化运维的目的。
ElasticJob-Lite: 定位为轻量级无中心化解决方案,使用 jar 的形式提供分布式任务的协调服务。
ElasticJob-Cloud: 采用自研 Mesos Framework 的解决方案,额外提供资源治理、应用分发以及进程隔离等功能。
ElasticJob-Lite和ElasticJob-Cloud的区别
ElasticJob-Lite | ElasticJob-Cloud | |
---|---|---|
无中心化 | 是 | 否 |
资源分配 | 不支持 | 支持 |
作业模式 | 常驻 | 常驻 + 瞬时 |
部署依赖 | ZooKeeper | ZooKeeper + Mesos |
ElasticJob 是面向进程内的线程级调度框架。能够与 Spring 、Dubbo 等 Java 框架配合使用,在作业中可自由使用 Spring 注入的 Bean,如数据源连接池、Dubbo 远程服务等,更加方便的贴合业务开发。
弹性调度是 ElasticJob 最重要的功能,也是这款产品名称的由来。 它是一款能够让任务通过分片进行水平扩展的任务处理系统。
2:分片项
任务的分布式执行,需要将一个任务拆分为多个独立的任务项,然后由分布式的服务器分别执行某一个或几个分片项。
ElasticJob 中任务分片项的概念,使得任务可以在分布式的环境下运行,每台任务服务器只运行分配给该服务器的分片。随着服务器的增加或宕机,ElasticJob 会近乎实时的感知服务器数量的变更,从而重新为分布式的任务服务器分配更加合理的任务分片项,使得任务可以随着资源的增加而提升效率。
举例说明,如果作业分为 4 片,用两台服务器执行,则每个服务器分到 2 片,分别负责作业的 50% 的负载,如下图所示
3:相关依赖的说明
如果是当当网的依赖
- 如果是简单 Java 项目,使用 elastic-job-lite-core 核心依赖
<dependency>
<groupId>com.dangdang</groupId>
<artifactId>elastic-job-lite-core</artifactId>
<version>2.1.5</version>
</dependency>
- 如果是集成到 Spring 框架中,使用 elastic-job-lite-spring 依赖,该依赖包含了 elastic-job-lite-core 核心依赖
<dependency>
<groupId>com.dangdang</groupId>
<artifactId>elastic-job-lite-spring</artifactId>
<version>2.1.5</version>
</dependency>
ElasticJob 已于 2020 年 5 月 28 日成为 Apache ShardingSphere 的子项目。当当更新到 2.1.5 版本,捐赠到 Apache 后,包名变更,从 3.0.0 版本开始更新:
- 如果是简单 Java 项目,使用 elasticjob-lite-core 核心依赖:(注意在加入 Apache 后 elastic-job 少了 “-” 变为 elasticjob)
<dependency>
<groupId>org.apache.shardingsphere.elasticjob</groupId>
<artifactId>elasticjob-lite-core</artifactId>
<version>3.0.4</version>
</dependency>
- 如果是集成到 Spring 框架中,使用 elasticjob-lite-spring-core 依赖,该依赖包含了 elasticjob-lite-core 核心依赖:
<dependency>
<groupId>org.apache.shardingsphere.elasticjob</groupId>
<artifactId>elasticjob-lite-spring-core</artifactId>
<version>3.0.4</version>
</dependency>
- 如果是集成到 Spring Boot 中
<dependency>
<groupId>org.apache.shardingsphere.elasticjob</groupId>
<artifactId>elasticjob-lite-spring-boot-starter</artifactId>
<version>3.0.4</version>
</dependency>
4:目录结构
elastic-job
├──elastic-job-lite lite父模块,不应直接使用
├ ├──elastic-job-lite-core Java支持模块,可直接使用
├ ├──elastic-job-lite-spring Spring命名空间支持模块,可直接使用
├ ├──elastic-job-lite-lifecyle lite作业相关操作模块,不可直接使用
├ ├──elastic-job-lite-console lite界面模块,可直接使用
├──elastic-job-example 使用示例
├ ├──elastic-job-example-embed-zk 供示例使用的内嵌ZK模块
├ ├──elastic-job-example-jobs 作业示例
├ ├──elastic-job-example-lite-java 基于Java的使用示例
├ ├──elastic-job-example-lite-spring 基于Spring的使用示例
├ ├──elastic-job-example-lite-springboot 基于SpringBoot的使用示例
├──elastic-job-doc markdown生成文档的项目,使用方无需关注
├ ├──elastic-job-lite-doc lite相关文档
elastic-job-lite-core:
- 这是 ElasticJob-Lite 的核心模块,提供了 ElasticJob 的核心功能。
- 包含了分布式任务调度的基本功能,如任务的注册、启动、暂停、恢复、停止等。
- 支持多种作业类型,如简单作业(Simple Job)、数据流作业(Dataflow Job)和脚本作业(Script Job)等。
- 提供了分片策略、任务执行器等相关接口和实现。
elastic-job-lite-spring:
- 这是 ElasticJob-Lite 的 Spring 整合模块,用于与 Spring 框架集成。
- 提供了与 Spring 的无缝集成,可以通过 Spring 的配置文件或注解来定义和管理 ElasticJob 的作业。
- 支持通过 Spring 的方式配置和管理 ElasticJob 的作业信息、触发器、分片策略等。
elastic-job-lite-lifecycle:
- 这是 ElasticJob-Lite 的生命周期模块,用于管理作业的生命周期。
- 提供了在作业启动和关闭时执行一些额外的操作的扩展点。
- 可以通过实现相应的接口,在作业启动和关闭时执行自定义的逻辑,如初始化资源、清理资源等。
elastic-job-lite-console:
- 这是 ElasticJob-Lite 的控制台模块,用于提供可视化界面管理 ElasticJob。
- 可直接使用该模块来搭建一个 Web 界面,用于管理和监控 ElasticJob 的作业。
- 提供了作业配置、作业状态监控、作业运行日志等功能。
5: 使用示例
下载zk, 复制zoo_sample.cfg -> zoo.cfg,修改dataDir为指定的目录
这里集成到 Spring 框架中使用,并使用当当网下的依赖,所以引入 elastic-job-lite-spring 依赖。
<dependency>
<groupId>com.dangdang</groupId>
<artifactId>elastic-job-lite-spring</artifactId>
<version>2.1.5</version>
</dependency>
任务接口实现 -> 实现 SimpleJob 接口,该接口仅提供单一方法,此方法将定时执行。重写该方法,编写自己的业务逻辑代码。
package com.cui.uid.job;
import com.dangdang.ddframe.job.api.ShardingContext;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Component;
import java.util.Date;
/**
* <p>
* 功能描述:
* </p>
*
* @author cui haida
* @date 2025/04/20/18:34
*/
@Component
@Qualifier("myjob")
public class Myjob implements SimpleJob {
@Override
public void execute(ShardingContext context) {
switch (context.getShardingItem()) {
case 0:
System.out.println("执行分片0,dateTime:" + new Date(System.currentTimeMillis()));
break;
case 1:
System.out.println("执行分片1,dateTime:" + new Date(System.currentTimeMillis()));
break;
}
}
}
配置文件配置,指定注册中心,Job相关配置等等
package com.cui.uid.configuration;
import com.cui.uid.job.Myjob;
import com.dangdang.ddframe.job.config.JobCoreConfiguration;
import com.dangdang.ddframe.job.config.simple.SimpleJobConfiguration;
import com.dangdang.ddframe.job.lite.api.JobScheduler;
import com.dangdang.ddframe.job.lite.config.LiteJobConfiguration;
import com.dangdang.ddframe.job.reg.base.CoordinatorRegistryCenter;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperConfiguration;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperRegistryCenter;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* <p>
* 功能描述:
* </p>
*
* @author cui haida
* @date 2025/04/20/18:39
*/
@Configuration
public class JobConfig {
@Autowired
@Qualifier("myjob")
private Myjob myjob;
@Bean(initMethod = "init")
public JobScheduler jobDemo() {
System.out.println("jobDemo");
// 注册中心、job配置,JobScheduler init 方法启动
return new JobScheduler(
createRegistryCenter(), createJobConfiguration()
);
}
private CoordinatorRegistryCenter createRegistryCenter() {
// zookeeper 服务器列表 "localhost:2181"
// zookeeper 命名空间 "elastic-job-demo-zoo"
CoordinatorRegistryCenter regCenter = new ZookeeperRegistryCenter(new ZookeeperConfiguration("localhost:2181"
, "elastic-job-demo-zoo"));
regCenter.init();
return regCenter;
}
private LiteJobConfiguration createJobConfiguration() {
// 定义作业核心配置
// 作业名称:"demoSimpleJob"、 CRON 表达式,控制作业触发时间:"0/15 * * * * ?"、 作业分片总数:2
JobCoreConfiguration simpleCoreConfig =
JobCoreConfiguration.newBuilder("demoSimpleJob", "0/15 * * * * ?", 2).build();
// 定义 SIMPLE 类型配置
SimpleJobConfiguration simpleJobConfig =
new SimpleJobConfiguration(simpleCoreConfig, myjob.getClass().getCanonicalName());
// 定义 Lite 作业根配置
return LiteJobConfiguration.newBuilder(simpleJobConfig).build();
}
}
然后启动spring boot服务就可以了
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}