canal获取mysql增量数据经过kerberos认证到kafka
步骤:
1.mysql需要开启binlog日志
# 修改mysql配置文件
vim /etc/my.cnf
-----------------------
[mysqld]
#开启binlog
log-bin=mysql-bin
#日志记录方式,建议使用ROW模式
binlog-format=ROW
#给当前mysql一个server id,之后的CDC工具里配置的不能跟这个重复
server_id=1
开启binlog之后重启mysql服务:
#重复mysql服务
service mysqld restart
#查看mysql服务状态
service mysqld status
进入mysql命令行,使用下面的命令查看binlog开启的情况:
mysql> show variables like 'binlog_format';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| binlog_format | ROW |
+---------------+-------+
1 row in set (0.01 sec)
mysql> show variables like 'log_bin';
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| log_bin | ON |
+---------------+-------+
1 row in set (0.00 sec)
由于canal伪装成MySQL的从节点,因此需要在被监控的MySQL上给canal创建一个拥有复制权限的用户:
#创建用户canal,密码canal_pwd
CREATE USER canal IDENTIFIED BY 'canal_pwd';
#如果创建用户时报错,密码不满足复杂度,可以使用下面的命令修改密码复杂度条件
#查看密码策略
SHOW VARIABLES LIKE 'validate_password%';
#设置密码策略为LOW
set global validate_password_policy=LOW;
#设置密码最短位数
set global validate_password_length=4;
#授予canal用户复制binlog的权限
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
-- GRANT ALL PRIVILEGES ON *.* TO 'canal'@'%' ;
#刷新权限
FLUSH PRIVILEGES;
#查看canal用户的权限
show grants for 'canal'
2.配置canal conf路径下的文件
a.修改instance.properties
canal.instance.master.address=cdh01:3306
# username/password
canal.instance.dbUsername=canal
canal.instance.dbPassword=Monda@123
canal.mq.topic=test_0825 #指定topic
b.修改canal.properties
# tcp, kafka, RocketMQ
canal.serverMode = kafka
canal.zkServers =cdh01:2181
canal.mq.servers = cdh01:9092,cdh02:9092,cdh03:9092
canal.mq.kafka.kerberos.enable =true
canal.mq.kafka.kerberos.krb5FilePath = ../conf/kerberos/krb5.conf
canal.mq.kafka.kerberos.jaasFilePath = ../conf/kerberos/jaas.conf
c.修改startup.sh
JAVA_OPTS=" $JAVA_OPTS -Djava.security.auth.login.config=/opt/canal/canal_server/conf/kerberos/jaas.conf"
JAVA_OPTS=" $JAVA_OPTS -Djava.security.krb5.conf=/opt/canal/canal_server/conf/kerberos/krb5.conf"
d.在conf目录下创建kerberos目录
以下这两个文件在kafka配置kerberos都已存在,直接复制过来即可
vi jaas.conf
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/opt/software/kerberos/kafka.keytab"
principal="kafka@HADOOP.COM";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=false
useTicketCache=true
renewTicket=true;
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/opt/software/kerberos/kafka.keytab"
storeKey=true
useTicketCache=false
principal="kafka@HADOOP.COM";
};
vi krb5.conf
# Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
pkinit_anchors = FILE:/etc/pki/tls/certs/ca-bundle.crt
default_realm = HADOOP.COM
#default_ccache_name = KEYRING:persistent:%{uid}
[realms]
HADOOP.COM = {
kdc = cdh01
admin_server = cdh01
}
[domain_realm]
.hadoop.com = HADOOP.COM
hadoop.com = HADOOP.COM
配置完成之后启动canal 进行验证即可