docker-compos Mongo 6.0 分片集群高可用搭建
本文采用 mongo version: 6.0进行搭建
我们将创建一个包含以下详细信息的分片集群。
一个具有 3 个副本集的 Config Server。
一个分片,每个分片有 3 个副本集。
三个路由器 (mongos)
根据上述详细信息,Config Server 需要 3 台服务器(每个副本 1 台),Shard1 需要 3 台服务器(每个副本 1 台)Router (mongos) 需要 3 台服务器。
服务器配置及端口
IP | ROLE | PORT |
---|---|---|
192.168.1.145 | configsvr | 10001 |
192.168.1.145 | shard1 | 20001 |
192.168.1.145 | shard2 | 20002 |
192.168.1.156 | mongos | 27017 |
192.168.1.146 | configsvr | 10001 |
192.168.1.146 | shard1 | 20001 |
192.168.1.146 | shard2 | 20002 |
192.168.1.146 | mongos | 27017 |
192.168.1.147 | configsvr | 10001 |
192.168.1.147 | shard1 | 20001 |
192.168.1.147 | shard2 | 20002 |
192.168.1.147 | mongos | 27017 |
创建目录
#三台服务器均执行
mkdir -p /mongo/{common,data,log}
mkdir -p /mongo/data/{config,mongo,shard1,shard2}
mkdir -p /mongo/log/{shard1,shard2}
chmod 0777 -R /mongo/log
编写docker-compose文件
# 三台服务器compose文件均相同
[root@node1 mongo]# cat /mongo/docker-compose.yml
version: '3'
services:
configsvr1:
container_name: config
image: mongo:6.0.11
command: mongod --configsvr --replSet config_rs --dbpath /data/db --port 27017
ports:
- 10001:27017
volumes:
- ./common/configsvr1:/data/db
#- ./common/key.file:/home/key.file
shard1:
container_name: shard1
image: mongo:6.0.11
command: mongod --replSet shard1_rs --dbpath /data/db --port 27017 --bind_ip 0.0.0.0 --logpath /log/shard/mongo.log --shardsvr --wiredTigerCacheSizeGB 1
ports:
- 20001:27017
volumes:
- ${PWD}/data/shard1:/data/db
#- ${PWD}/common/key.file:/home/key.file
- ${PWD}/log/shard1:/log/shard
shard2:
container_name: shard2
image: mongo:6.0.11
command: mongod --replSet shard2_rs --dbpath /data/db --port 27017 --bind_ip 0.0.0.0 --logpath /log/shard/mongo.log --shardsvr --wiredTigerCacheSizeGB 1
ports:
- 20002:27017
volumes:
- ${PWD}/data/shard2:/data/db
#- ${PWD}/common/key.file:/home/key.file
- ${PWD}/log/shard2:/log/shard
mongos:
container_name: mongos
image: mongo:6.0.11
command: mongos --configdb config_rs/192.168.1.145:10001,192.168.1.146:10001,192.168.1.147:10001 --port 27017 --bind_ip_all
ports:
- 27017:27017
volumes:
- ./common/key.file:/home/key.file
启动容器
#三台服务器均执行
cd /mongo;
#启动容器
docker-compose up -d
#查看容器
[root@node1 test]# docker-compose ps -a
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
config mongo:6 "docker-entrypoint.s…" configsvr1 About an hour ago Up About an hour 0.0.0.0:10001->27017/tcp, :::10001->27017/tcp
mongos mongo:6 "docker-entrypoint.s…" mongos About an hour ago Up About an hour 0.0.0.0:30000->27017/tcp, :::30000->27017/tcp
shardsvr1_1 mongo:6 "docker-entrypoint.s…" shardsvr1_1 About an hour ago Up About an hour 0.0.0.0:20001->27017/tcp, :::20001->27017/tcp
shardsvr1_2 mongo:6 "docker-entrypoint.s…" shardsvr1_2 About an hour ago Up About an hour 0.0.0.0:20002->27017/tcp, :::20002->27017/tcp
shardsvr1_3 mongo:6 "docker-entrypoint.s…" shardsvr1_3 About an hour ago Up About an hour 0.0.0.0:20003->27017/tcp, :::20003->27017/tcp
[root@node1 test]#
分片配置
本节,我们将创建2个分片。先创建一个分片导入数据,然后创建第二个分片,验证数据均衡。
创建第一个分片,初始化副本集
#这里的操作,在192.168.1.145执行。
#192.168.1.145
docker exec -it shard1 mongosh (进入CLI工具)
rs.initiate(
... {
... _id: "shard1_rs",
... members: [
... { _id : 0, host : "192.168.1.145:20001" },
... { _id : 1, host : "192.168.1.146:20001" },
... { _id : 2, host : "192.168.1.147:20001" }
... ]
... }
... );
{ ok: 1 }
#查看状态
rs.status()
shard1_rs [direct: other] test> rs.status();
{
set: 'shard1_rs',
date: ISODate("2024-08-11T08:19:39.865Z"),
myState: 1,
term: Long("1"),
syncSourceHost: '',
syncSourceId: -1,
heartbeatIntervalMillis: Long("2000"),
majorityVoteCount: 2,
writeMajorityCount: 2,
votingMembersCount: 3,
writableVotingMembersCount: 3,
optimes: {
lastCommittedOpTime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
lastCommittedWallTime: ISODate("2024-08-11T08:19:30.810Z"),
readConcernMajorityOpTime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
appliedOpTime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
durableOpTime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
lastAppliedWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastDurableWallTime: ISODate("2024-08-11T08:19:30.810Z")
},
lastStableRecoveryTimestamp: Timestamp({ t: 1723364329, i: 1 }),
electionCandidateMetrics: {
lastElectionReason: 'electionTimeout',
lastElectionDate: ISODate("2024-08-11T08:19:00.778Z"),
electionTerm: Long("1"),
lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1723364329, i: 1 }), t: Long("-1") },
lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1723364329, i: 1 }), t: Long("-1") },
numVotesNeeded: 2,
priorityAtElection: 1,
electionTimeoutMillis: Long("10000"),
numCatchUpOps: Long("0"),
newTermStartDate: ISODate("2024-08-11T08:19:00.797Z"),
wMajorityWriteAvailabilityDate: ISODate("2024-08-11T08:19:01.548Z")
},
members: [
{
_id: 0,
name: '192.168.1.145:20001',
health: 1,
state: 1,
stateStr: 'PRIMARY',
uptime: 453,
optime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
optimeDate: ISODate("2024-08-11T08:19:30.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastDurableWallTime: ISODate("2024-08-11T08:19:30.810Z"),
syncSourceHost: '',
syncSourceId: -1,
infoMessage: '',
electionTime: Timestamp({ t: 1723364340, i: 1 }),
electionDate: ISODate("2024-08-11T08:19:00.000Z"),
configVersion: 1,
configTerm: 1,
self: true,
lastHeartbeatMessage: ''
},
{
_id: 1,
name: '192.168.1.146:20001',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 49,
optime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
optimeDurable: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
optimeDate: ISODate("2024-08-11T08:19:30.000Z"),
optimeDurableDate: ISODate("2024-08-11T08:19:30.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastDurableWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastHeartbeat: ISODate("2024-08-11T08:19:38.805Z"),
lastHeartbeatRecv: ISODate("2024-08-11T08:19:39.812Z"),
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: '192.168.1.145:20001',
syncSourceId: 0,
infoMessage: '',
configVersion: 1,
configTerm: 1
},
{
_id: 2,
name: '192.168.1.147:20001',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 49,
optime: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
optimeDurable: { ts: Timestamp({ t: 1723364370, i: 1 }), t: Long("1") },
optimeDate: ISODate("2024-08-11T08:19:30.000Z"),
optimeDurableDate: ISODate("2024-08-11T08:19:30.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastDurableWallTime: ISODate("2024-08-11T08:19:30.810Z"),
lastHeartbeat: ISODate("2024-08-11T08:19:38.805Z"),
lastHeartbeatRecv: ISODate("2024-08-11T08:19:39.812Z"),
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: '192.168.1.145:20001',
syncSourceId: 0,
infoMessage: '',
configVersion: 1,
configTerm: 1
}
],
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723364370, i: 1 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723364370, i: 1 })
}
#查看master节点
db.isMaster
shard1_rs [direct: primary] test> db.isMaster();
{
topologyVersion: {
processId: ObjectId("66b87256c5f141da61558f12"),
counter: Long("7")
},
hosts: [
'192.168.1.145:20001',
'192.168.1.146:20001',
'192.168.1.147:20001'
],
setName: 'shard1_rs',
setVersion: 1,
ismaster: true, ###145为master
secondary: false,
primary: '192.168.1.145:20001',
me: '192.168.1.145:20001',
electionId: ObjectId("7fffffff0000000000000001"),
lastWrite: {
opTime: { ts: Timestamp({ t: 1723364410, i: 1 }), t: Long("1") },
lastWriteDate: ISODate("2024-08-11T08:20:10.000Z"),
majorityOpTime: { ts: Timestamp({ t: 1723364410, i: 1 }), t: Long("1") },
majorityWriteDate: ISODate("2024-08-11T08:20:10.000Z")
},
isImplicitDefaultMajorityWC: true,
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 100000,
localTime: ISODate("2024-08-11T08:20:17.527Z"),
logicalSessionTimeoutMinutes: 30,
connectionId: 2,
minWireVersion: 0,
maxWireVersion: 17,
readOnly: false,
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723364410, i: 1 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723364410, i: 1 }),
isWritablePrimary: true
配置 Config Server 环境
进入第一台虚拟机 (192.168.1.145) 的 config-server 容器(仅需要在一台执行即可)
执行docker exec -it config mongosh --port 27017
rs.initiate(
{
_id: "config_rs",
configsvr: true,
members: [
{ _id : 0, host : "192.168.1.145:10001" },
{ _id : 1, host : "192.168.1.146:10001" },
{ _id : 2, host : "192.168.1.147:10001" }
]
}
);
测试configsvr
docker exec -it config mongosh --port 27017 --eval "db.runCommand({ ping: 1 })" | grep ok
ok: 1,
Mongos 配置
#192.168.1.145
docker exec -it mongos mongosh
[direct: mongos] test> sh.addShard("shard1_rs/192.168.1.145:20001,192.168.1.146:20001,192.168.1.147:20001")
{
shardAdded: 'shard1_rs',
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723365952, i: 7 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723365952, i: 7 })
}
##**查看mongos状态**
[direct: mongos] test> sh.status()
shardingVersion
{ _id: 1, clusterId: ObjectId("66b8766df5f2eabc84fc6b34") }
---
shards
[
{
_id: 'shard1_rs',
host: 'shard1_rs/192.168.1.145:20001,192.168.1.146:20001,192.168.1.147:20001',
state: 1,
topologyTime: Timestamp({ t: 1723365952, i: 4 })
}
]
---
active mongoses
[ { '6.0.11': 2 } ]
---
autosplit
{ 'Currently enabled': 'yes' }
---
balancer
{
'Currently enabled': 'yes',
'Failed balancer rounds in last 5 attempts': 0,
'Currently running': 'no',
'Migration Results for the last 24 hours': 'No recent migrations'
}
---
databases
[
{
database: { _id: 'config', primary: 'config', partitioned: true },
collections: {}
}
]
功能测试
创建分片集合
1.链接到mongos,创建分配集合
docker exec -it mongos mongosh
mongos>sh.status()
#为了集合支持分片,需要先开启database的分片功能
[direct: mongos] admin> use graph;
switched to db graph
[direct: mongos] graph> sh.enableSharding("graph") ##开启
{
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723366451, i: 4 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723366451, i: 2 })
}
##对集合进行分片初始化
[direct: mongos] graph> sh.shardCollection("graph.emp",{_id: 'hashed'})
{
collectionsharded: 'graph.emp',
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723366617, i: 29 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723366617, i: 25 })
}
###
sh.status()
shardingVersion
{ _id: 1, clusterId: ObjectId("66b8766df5f2eabc84fc6b34") }
---
shards
[
{
_id: 'shard1_rs',
host: 'shard1_rs/192.168.1.145:20001,192.168.1.146:20001,192.168.1.147:20001',
state: 1,
topologyTime: Timestamp({ t: 1723365952, i: 4 })
}
]
---
active mongoses
[ { '6.0.11': 2 } ]
---
autosplit
{ 'Currently enabled': 'yes' }
---
balancer
{
'Currently enabled': 'yes',
'Currently running': 'no',
'Failed balancer rounds in last 5 attempts': 0,
'Migration Results for the last 24 hours': 'No recent migrations'
}
---
databases
[
{
database: { _id: 'config', primary: 'config', partitioned: true },
collections: {
'config.system.sessions': {
shardKey: { _id: 1 },
unique: false,
balancing: true,
chunkMetadata: [ { shard: 'shard1_rs', nChunks: 1024 } ],
chunks: [
'too many chunks to print, use verbose if you want to force print'
],
tags: []
}
}
},
{
database: {
_id: 'graph',
primary: 'shard1_rs',
partitioned: false,
version: {
uuid: new UUID("d8e56cbd-86e1-47a1-b6e7-af18747e7af0"),
timestamp: Timestamp({ t: 1723366450, i: 1 }),
lastMod: 1
}
},
collections: {
'graph.emp': {
shardKey: { _id: 'hashed' }, ##这里已经查看到id为hashed
unique: false,
balancing: true,
chunkMetadata: [ { shard: 'shard1_rs', nChunks: 2 } ],
chunks: [
{ min: { _id: MinKey() }, max: { _id: Long("0") }, 'on shard': 'shard1_rs', 'last modified': Timestamp({ t: 1, i: 0 }) },
{ min: { _id: Long("0") }, max: { _id: MaxKey() }, 'on shard': 'shard1_rs', 'last modified': Timestamp({ t: 1, i: 1 }) }
],
tags: []
}
}
}
]
数据测试
docker exec -it mongos mongosh
##插入数据测试
use graph
for (var i = 0;i <=10000;i++)
{
db.emp.insert({i:i});
}
##查询数据及分布情况
[direct: mongos] graph> db.emp.getShardDistribution()
Shard shard1_rs at shard1_rs/192.168.1.145:20001,192.168.1.146:20001,192.168.1.147:20001
{
data: '283KiB',
docs: 10001,
chunks: 2,
'estimated data per chunk': '141KiB',
'estimated docs per chunk': 5000
}
---
Totals
{
data: '283KiB',
docs: 10001,
chunks: 2,
'Shard shard1_rs': [
'100 % data',
'100 % docs in cluster',
'29B avg obj size on shard'
]
}
###查询数量
[direct: mongos] graph> db.emp.find().count()
10001
创建第二个分片,验证数据均衡
构建复制集
### 初始化复制集shard2
docker exec -it shard2 mongosh
test> rs.initiate(
.1.146:20002" },
{ _id : 2, host : "192.168.1.147:20002" }
]
}
... {
... _id: "shard2_rs",
... members: [
... { _id : 0, host : "192.168.1.145:20002" },
... { _id : 1, host : "192.168.1.146:20002" },
... { _id : 2, host : "192.168.1.147:20002" }
... ]
... }
... );
{ ok: 1 }
##查看状态
shard2_rs [direct: other] test> rs.status()
{
set: 'shard2_rs',
date: ISODate("2024-08-11T09:13:59.622Z"),
myState: 1,
term: Long("1"),
syncSourceHost: '',
syncSourceId: -1,
heartbeatIntervalMillis: Long("2000"),
majorityVoteCount: 2,
writeMajorityCount: 2,
votingMembersCount: 3,
writableVotingMembersCount: 3,
optimes: {
lastCommittedOpTime: { ts: Timestamp({ t: 1723367639, i: 6 }), t: Long("1") },
lastCommittedWallTime: ISODate("2024-08-11T09:13:59.441Z"),
readConcernMajorityOpTime: { ts: Timestamp({ t: 1723367639, i: 6 }), t: Long("1") },
appliedOpTime: { ts: Timestamp({ t: 1723367639, i: 6 }), t: Long("1") },
durableOpTime: { ts: Timestamp({ t: 1723367639, i: 6 }), t: Long("1") },
lastAppliedWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastDurableWallTime: ISODate("2024-08-11T09:13:59.441Z")
},
lastStableRecoveryTimestamp: Timestamp({ t: 1723367626, i: 1 }),
electionCandidateMetrics: {
lastElectionReason: 'electionTimeout',
lastElectionDate: ISODate("2024-08-11T09:13:58.158Z"),
electionTerm: Long("1"),
lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
numVotesNeeded: 2,
priorityAtElection: 1,
electionTimeoutMillis: Long("10000"),
numCatchUpOps: Long("0"),
newTermStartDate: ISODate("2024-08-11T09:13:58.176Z"),
wMajorityWriteAvailabilityDate: ISODate("2024-08-11T09:13:59.405Z")
},
members: [
{
_id: 0,
name: '192.168.1.145:20002',
health: 1,
state: 1,
stateStr: 'PRIMARY',
uptime: 3713,
optime: { ts: Timestamp({ t: 1723367639, i: 6 }), t: Long("1") },
optimeDate: ISODate("2024-08-11T09:13:59.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastDurableWallTime: ISODate("2024-08-11T09:13:59.441Z"),
syncSourceHost: '',
syncSourceId: -1,
infoMessage: '',
electionTime: Timestamp({ t: 1723367638, i: 1 }),
electionDate: ISODate("2024-08-11T09:13:58.000Z"),
configVersion: 1,
configTerm: 1,
self: true,
lastHeartbeatMessage: ''
},
{
_id: 1,
name: '192.168.1.146:20002',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 12,
optime: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
optimeDurable: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
optimeDate: ISODate("2024-08-11T09:13:46.000Z"),
optimeDurableDate: ISODate("2024-08-11T09:13:46.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastDurableWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastHeartbeat: ISODate("2024-08-11T09:13:58.165Z"),
lastHeartbeatRecv: ISODate("2024-08-11T09:13:59.169Z"),
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: '',
syncSourceId: -1,
infoMessage: '',
configVersion: 1,
configTerm: 0
},
{
_id: 2,
name: '192.168.1.147:20002',
health: 1,
state: 2,
stateStr: 'SECONDARY',
uptime: 12,
optime: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
optimeDurable: { ts: Timestamp({ t: 1723367626, i: 1 }), t: Long("-1") },
optimeDate: ISODate("2024-08-11T09:13:46.000Z"),
optimeDurableDate: ISODate("2024-08-11T09:13:46.000Z"),
lastAppliedWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastDurableWallTime: ISODate("2024-08-11T09:13:59.441Z"),
lastHeartbeat: ISODate("2024-08-11T09:13:58.165Z"),
lastHeartbeatRecv: ISODate("2024-08-11T09:13:59.169Z"),
pingMs: Long("0"),
lastHeartbeatMessage: '',
syncSourceHost: '',
syncSourceId: -1,
infoMessage: '',
configVersion: 1,
configTerm: 0
}
],
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1723367639, i: 6 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1723367639, i: 6 })
}
添加到mongos中
docker exec -it mongos mongosh
sh.addShard("shard2_rs/192.168.1.145:20002,192.168.1.146:20002,192.168.1.147:20002")
创建用户
docker exec -it mongos mongosh --port 27017
use admin;db.createUser({user:"root",pwd:"boya1234",roles:[{role:"clusterAdmin",db:"admin"},{role:"clusterManager",db:"admin"},{role:"clusterMonitor",db:"admin"}]});
db.auth('root','boya1234');db.createUser({user: "rootTestSub",pwd: "boya1234", roles: [{ role: "dbAdminAnyDatabase", db: "admin" },{ role: "readWriteAnyDatabase", db: "admin" }]});
##结果如下
,{role:"clusterManager",db:"admin"},{role:"clusterMonitor",db:"admin"}]});
{
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1719207433, i: 4 }),
signature: {
hash: Binary.createFromBase64("AAAAAAAAAAAAAAAAAAAAAAAAAAA=", 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1719207433, i: 4 })
}