tidb集群基于多副本容灾

https://www.bookstack.cn/read/tidb-8.1-zh/4c922925a950e576.md

数据库版本

  • tidb8.5.0

集群整体架构

IP地址 系统版本 备注 区域
192.168.222.151 redhat8.4 tidb8.5 1
192.168.222.152 redhat8.4 tidb8.5 1
192.168.222.153 redhat8.4 tidb8.5 1
192.168.222.154 redhat8.4 tidb8.5 2
192.168.222.155 redhat8.4 tidb8.5 3
192.168.222.156 redhat8.4 tidb8.5 2
192.168.222.157 redhat8.4 tidb8.5 2

集群架构介绍

分三个区域,第一个区域 3 个节点,第二个区域 3 个节点,第三个区域 1 个节点
区域 1 作为 primary region,区域 2 作为 secondary region,而区域 3 则作为投票使用的第三个区域 
tikv 7 个副本,pd 7 个副本

设置 SSH 免密登录

# - 192.168.222.151 主机上执行集群部署,扩容,缩容等

# 设置密钥
ssh-keygen -t rsa -b 4096

# 分发公钥到所有目标节点
for i in {
   
   1..7};do echo $i;ssh-copy-id root@192.168.222.15$i ;done

tidb 插件安装

# 安装 TiUP:
curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh

# 设置环境变量:
source /root/.bash_profile

# 更新 TiUP:
tiup update --self
tiup update cluster

基于正式环境进行更改及部署

部署 TiDB 集群

  • 初始化配置文件:
tiup cluster template > topology.yaml

# 创建新的配置文件
[root@localhost tidb]# cat tidb-cluster.yaml 
global:
  user: "tidb"
  ssh_port: 22
  deploy_dir: "/data/tidb/tidbdeploy"
  data_dir: "/data/tidb//tidbdata"
pd_servers:
  - host: 192.168.222.151
  - host: 192.168.222.152
  - host: 192.168.222.153
  - host: 192.168.222.154
  - host: 192.168.222.155
  - host: 192.168.222.156
  - host: 192.168.222.157
tidb_servers:
  - host: 192.168.222.151
  - host: 192.168.222.154
tikv_servers:
  - host: 192.168.222.151
  - host: 192.168.222.152
  - host: 192.168.222.153
  - host: 192.168.222.154
  - host: 192.168.222.155
  - host: 192.168.222.156
  - host: 192.168.222.157
monitoring_servers:
  - host: 192.168.222.153
grafana_servers:
  - host: 192.168.222.153

部署集群:

# 查看tidb版本
[root@localhost tidb]# tiup list tidb
# 部署
tiup cluster deploy tidb-cluster v8.5.0 tidb-cluster.yaml -u root

#  卸载集群
# tiup cluster destroy tidb-cluster

启动集群:

tiup cluster start tidb-cluster --init
# tiup cluster start tidb-cluster

查看集群状态:

[root@localhost tidb]# sh -x status-tidb.sh 
+ /root/.tiup/bin/tiup cluster display tidb-cluster
Cluster type:       tidb
Cluster name:       tidb-cluster
Cluster version:    v8.5.0
Deploy user:        tidb
SSH type:           builtin
Dashboard URL:      http://192.168.222.152:2379/dashboard
Grafana URL:        http://192.168.222.153:3000
ID                     Role        Host             Ports        OS/Arch       Status  Data Dir                             Deploy Dir
--                     ----        ----             -----        -------       ------  --------                             ----------
192.168.222.153:3000   grafana     192.168.222.153  3000         linux/x86_64  Up      -                                    /data/tidb/tidbdeploy/grafana-3000
192.168.222.151:2379   pd          192.168.222.151  2379/2380    linux/x86_64  Up|L    /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.152:2379   pd          192.168.222.152  2379/2380    linux/x86_64  Up|UI   /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.153:2379   pd          192.168.222.153  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.154:2379   pd          192.168.222.154  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.155:2379   pd          192.168.222.155  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.156:2379   pd          192.168.222.156  2379/2380    linux/x86_64  Up      /data/tidb/tidbdata/pd-2379          /data/tidb/tidbdeploy/pd-2379
192.168.222.153:9090   prometheus  192.168.222.153  9090/12020   linux/x86_64  Up      /data/tidb/tidbdata/prometheus-9090  /data/tidb/tidbdeploy/prometheus-9090
192.168.222.151:4000   tidb        192.168.222.151  4000/10080   linux/x86_64  Up      -                                    /data/tidb/tidbdeploy/tidb-4000
192.168.222.154:4000   tidb        192.168.222.154  4000/10080   linux/x86_64  Up      -                                    /data/tidb/tidbdeploy/tidb-4000
192.168.222.151:20160  tikv        192.168.222.151  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
192.168.222.152:20160  tikv        192.168.222.152  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
192.168.222.153:20160  tikv        192.168.222.153  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
192.168.222.154:20160  tikv        192.168.222.154  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
192.168.222.155:20160  tikv        192.168.222.155  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
192.168.222.156:20160  tikv        192.168.222.156  20160/20180  linux/x86_64  Up      /data/tidb/tidbdata/tikv-20160       /data/tidb/tidbdeploy/tikv-20160
Total nodes: 16


### 更改root密码
```shell
/home/data/mysql-8.0.30-el7-x86_64/bin/mysql -h 192.168.222.151 -P 4000 -u root -p'^uD_VmT3J6714@d-H2'
ALTER USER 'root'@'%' IDENTIFIED BY '123456';

tidb 写入部分测试数据

tiup bench tpcc  prepare -H 192.168.222.151 -P 4000 -p 123456 -D tpcc --warehouses 1

查看 leader 信息

mysql> SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;
+----------+-----------------------+--------------+-------+
| STORE_ID | address               | leader_count | label |
+----------+-----------------------+--------------+-------+
|        1 | 192.168.222.153:20160 |            3 | null  |
|        4 | 192.168.222.151:20160 |            0 | null  |
|        5 | 192.168.222.152:20160 |           29 | null  |
|     1001 | 192.168.222.155:20160 |           21 | null  |
|     1002 | 192.168.222.154:20160 |           20 | null  |
|     12001 | 192.168.222.156:20160 |           20 | null  |
+----------+-----------------------+--------------+-------+

编写新的集群文件

[root@localhost tidb]# cat tidb-cluster.yaml 
global:
  user: "root"
  ssh_port: 22
  deploy_dir: "/data/tidb/tidbdeploy"
  data_dir: "/data/tidb//tidbdata"
server_configs:
  tikv:
    server.grpc-compression-type: gzip # gzip 启用 TiKV 之间的消息压缩,从而降低网络流量。
  pd:
    replication.location-labels:  ["Region","AZ"] # PD 会根据 TiKV 节点的 Region 和 AZ 配置来进行副本的调度。
pd_servers:
  - host: 192.168.222.151
    name: "pd-192.168.222.151-2379"
  - host: 192.168.222.152
    name: "pd-192.168.222.152-2379"
  - host: 192.168.222.153
    name: "pd-192.168.222.153-2379"
  - host: 192.168.222.154
    name: "pd-192.168.222.154-2379"
  - host: 192.168.222.155
    name: "pd-192.168.222.155-2379"
  - host: 192.168.222.156
    name: "pd-192.168.222.156-2379"
  - host: 192.168.222.157
    name: "pd-192.168.222.157-2379"
tidb_servers:
  - host: 192.168.222.151
  - host: 192.168.222.154
tikv_servers:  # 在 TiKV 节点中通过 labels 选项来对每个 TiKV 节点所在的 Region 和 AZ 进行标记
  - host: 192.168.222.151
    config:
      server.labels: {
   
    Region: "Region1", AZ: "AZ1" }
  - host: 192.168.222.152
    config:
      server.labels: {
   
    Region: "Region1", AZ: "AZ2" }
  - host: 192.168.222.153
    config:
      server.labels: {
   
    Region: "Region1", AZ: "AZ3" }
  - host: 192.168.222.154
    config:
      server.labels: {
   
    Region: "Region2", AZ: "AZ4" }
  - host: 192.168.222.156
    config:
      server.labels: {
   
    Region: "Region2", AZ: "AZ6" }
  - host: 192.168.222.157
    config:
      server.labels: {
   
    Region: "Region2", AZ: "AZ7" }
      
  - host: 192.168.222.155
    config:
      server.labels: {
   
    Region: "Region3", AZ: "AZ5" }
      # 延长区域 3 参加选举的时间,从而避免该区域中的副本被选举为主节点
      raftstore.raft-min-election-timeout-ticks: 50
      raftstore.raft-max-election-timeout-ticks: 60
monitoring_servers:
  - host: 192.168.222.153
grafana_servers:
  - host: 192.168.222.153
alertmanager_servers:
  - host: 192.168.222.153

按上面的文件更改集群的配置,并重新加载集群

tiup cluster edit-config tidb-cluster
tiup cluster reload tidb-cluster -y

查看 leader 信息

mysql>  SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;
+----------+-----------------------+--------------+------------------------------------------------------------------------+
| STORE_ID | address               | leader_count | label                                                                  |
+----------+-----------------------+--------------+------------------------------------------------------------------------+
|        1 | 192.168.222.153:20160 |           12 | [{
   
   "key": "AZ", "value": "AZ3"}, {
   
   "key": "Region", "value": "Region1"}] |
|        4 | 192.168.222.151:20160 |            7 | [{
   
   "key": "AZ", "value": "AZ1"}, {
   
   "key": "Region", "value": "Region1"}] |
|        5 | 192.168.222.152:20160 |           15 | [{
   
   "key": "AZ", "value": "AZ2"}, {
   
   "key": "Region", "value": "Region1"}] |
|     1001 | 192.168.222.155:20160 |           13 | [{
   
   "key": "AZ", "value": "AZ5"}, {
   
   "key": "Region", "value": "Region3"}] |
|     1002 | 192.168.222.154:20160 |           15 | [{
   
   "key": "AZ", "value": "AZ4"}, {
   
   "key": "Region", "value": "Region2"}] |
|    12001 | 192.168.222.156:20160 |           14 | [{
   
   "key": "AZ", "value": "AZ6"}, {
   
   "key": "Region", "value": "Region2"}] |
|    33028 | 192.168.222.157:20160 |            6 | [{
   
   "key": "AZ", "value": "AZ6"}, {
   
   "key": "Region", "value": "Region2"}] |
+----------+-----------------------+--------------+------------------------------------------------------------------------+

集群的副本数和 Leader 限制进行配置

tiup ctl:v8.5.0 pd config set max-replicas 7
tiup ctl:v8.5.0 pd config set label-property reject-leader Region Region3

# 下面的步骤用于向集群中添加一些测试数据,可选
tiup bench tpcc  prepare -H 192.168.222.151 -P 4000 -p 123456 -D tpcc --warehouses 1

指定 PD leader 的优先级

# 优先级数值越大的节点成为 leader 的可能性越高
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.151-2379 6
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.152-2379 5
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.153-2379 4
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.154-2379 3
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.156-2379 2
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.157-2379 1
tiup ctl:v8.5.0 pd member leader_priority  pd-192.168.222.155-2379 0

# 获取成员信息
tiup ctl:v8.5.0 pd member
# PD 实例健康状态
tiup ctl:v8.5.0 pd health
# PD 存储情况
tiup ctl:v8.5.0 pd store

创建 placement rule,并将测试表的主副本固定在区域 1

-- 创建两个 placement rules,第一个是区域 1 作为主区域,在系统正常时使用,第二个是区域 2 作为备区域。
-- 作为主区域,当区域 1 出现问题时,区域 2 会作为主区域。
CREATE PLACEMENT POLICY primary_rule_for_region1 PRIMARY_REGION="Region1" REGIONS="Region1, Region2,Region3";
CREATE PLACEMENT POLICY secondary_rule_for_region2 PRIMARY_REGION="Region2" REGIONS="Region1,Region2,Region3";

-- 将刚刚创建的规则 primary_rule_for_region1 应用到对应的用户表上。
ALTER TABLE tpcc.warehouse PLACEMENT POLICY=primary_rule_for_region1;
ALTER TABLE tpcc.district PLACEMENT POLICY=primary_rule_for_region1;

-- 说明:请根据需要修改上面的数据库名称、表名和 placement rule 的名称。
-- 使用类似下面的查询,用户可以查看每个区域包含的 leader 数量,以确认 leader 迁移是否完成。
mysql>  SELECT STORE_ID, address, leader_count, label FROM INFORMATION_SCHEMA.TIKV_STORE_STATUS ORDER BY store_id;
+----------+-----------------------+--------------+------------------------------------------------------------------------+
| STORE_ID | address               | leader_count | label                                                                  |
+----------+-----------------------+--------------+------------------------------------------------------------------------+
|        1 | 192.168.222.153:20160 |           12 | [{
   
   "key": "AZ", "value": "AZ3"}, {
   
   "key": "Region", "value": "Region1"}] |
|        4 | 192.168.222.151:20160 |            7 | [{
   
   "key": "AZ", "value": "AZ1"}, {
   
   "key": "Region", "value": "Region1"}] |
|        5 | 192.168.222.152:20160 |           15 | [{
   
   "key": "AZ", "value": "AZ2"}, {
   
   "key": "Region", "value": "Region1"}] |
|     1001 | 192.168.222.155:20160 |           13 | [{
   
   "key": "AZ", "value": "AZ5"}, {
   
   "key": "Region", "value": "Region3"}] |
|     1002 | 192.168.222.154:20160 |           15 | [{
   
   "key": "AZ", "value": "AZ4"}, {
   
   "key": "Region", "value": "Region2"}] |
|    12001 | 192.168.222.156:20160 |           14 | [{
   
   "key": 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值