1. 使用mongodb 版本
mongodb-linux-x86_64-2.2.3-rc1
2. 环境配置
202.173.88.57
202.173.88.59
202.173.88.69
shard1 端口 27017
shard2 端口 27018
shard3
端口 27019
config 端口20000
mongos 端口40000
分别在三台物理机上创建三个shard 一主二备,并将这三个shard做replica set
3. 配置步骤
3.1 创建目录结构
在202.173.88.57服务器创建目录
mkdir -p /home/mongodb/workspace/data/shard11
mkdir -p /home/mongodb/workspace/data/shard12
mkdir -p /home/mongodb/workspace/data/shard13
mkdir -p /home/mongodb/workspace/data/config
mkdir -p /home/mongodb/workspace/data/log
在202.173.88.59服务器创建目录
mkdir -p /home/mongodb/workspace/data/shard21
mkdir -p /home/mongodb/workspace/data/shard22
mkdir -p /home/mongodb/workspace/data/shard23
mkdir -p /home/mongodb/workspace/data/config
mkdir -p /home/mongodb/workspace/data/log
在202.173.88.69服务器创建目录
mkdir -p /home/mongodb/workspace/data/shard31
mkdir -p /home/mongodb/workspace/data/shard32
mkdir -p /home/mongodb/workspace/data/shard33
mkdir -p /home/mongodb/workspace/data/config
mkdir -p /home/mongodb/workspace/data/log
3.2 在57,59,69上执行配置replica-set 名称为shard1
./mongod --shardsvr --replSet shard1 --port 27017 --dbpath /home/mongodb/workspace/data/shard11 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard11.log --logappend --fork
./mongod --shardsvr --replSet shard1 --port 27017 --dbpath /home/mongodb/workspace/data/shard21 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard21.log --logappend --fork
./mongod --shardsvr --replSet shard1 --port 27017 --dbpath /home/mongodb/workspace/data/shard31 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard31.log --logappend --fork
在57上执行./mongo 202.173.88.57:27017/admin
config = {_id: 'shard1', members: [{_id: 0, host: '202.173.88.57:27017'},{_id: 1, host: '202.173.88.59:27017'},{_id: 2, host: '202.173.88.69:27017'}]}
rs.initiate(config);
检查shard1结果
shard1:PRIMARY> rs.status();
{
"set" : "shard1",
"date" : ISODate("2013-04-17T03:42:48Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "202.173.88.57:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 229,
"optime" : Timestamp(1366170134000, 1),
"optimeDate" : ISODate("2013-04-17T03:42:14Z"),
"self" : true
},
{
"_id" : 1,
"name" : "202.173.88.59:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 29,
"optime" : Timestamp(1366170134000, 1),
"optimeDate" : ISODate("2013-04-17T03:42:14Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:42:47Z"),
"pingMs" : 0
},
{
"_id" : 2,
"name" : "202.173.88.69:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 29,
"optime" : Timestamp(1366170134000, 1),
"optimeDate" : ISODate("2013-04-17T03:42:14Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:42:47Z"),
"pingMs" : 0
}
],
"ok" : 1
}
3.3. 在57,59,69上执行配置replica-set 名称为shard2
./mongod --shardsvr --replSet shard2 --port 27018 --dbpath /home/mongodb/workspace/data/shard12 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard12.log --logappend --fork
./mongod --shardsvr --replSet shard2 --port 27018 --dbpath /home/mongodb/workspace/data/shard22 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard22.log --logappend --fork
./mongod --shardsvr --replSet shard2 --port 27018 --dbpath /home/mongodb/workspace/data/shard32 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard32.log --logappend --fork
在57上执行./mongo 202.173.88.57:27018/admin
config = {_id: 'shard2', members: [{_id: 0, host: '202.173.88.57:27018'},{_id: 1, host: '202.173.88.59:27018'},{_id: 2, host: '202.173.88.69:27018'}]}
rs.initiate(config);
检查shard2结果
shard2:PRIMARY> rs.status()
{
"set" : "shard2",
"date" : ISODate("2013-04-17T03:47:04Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "202.173.88.57:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 200,
"optime" : Timestamp(1366170393000, 1),
"optimeDate" : ISODate("2013-04-17T03:46:33Z"),
"self" : true
},
{
"_id" : 1,
"name" : "202.173.88.59:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 25,
"optime" : Timestamp(1366170393000, 1),
"optimeDate" : ISODate("2013-04-17T03:46:33Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:47:03Z"),
"pingMs" : 0
},
{
"_id" : 2,
"name" : "202.173.88.69:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 25,
"optime" : Timestamp(1366170393000, 1),
"optimeDate" : ISODate("2013-04-17T03:46:33Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:47:03Z"),
"pingMs" : 0
}
],
"ok" : 1
}
3.4 在57,59,69上执行配置replica-set 名称为shard3
./mongod --shardsvr --replSet shard3 --port 27019 --dbpath /home/mongodb/workspace/data/shard13 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard13.log --logappend --fork
./mongod --shardsvr --replSet shard3 --port 27019 --dbpath /home/mongodb/workspace/data/shard23 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard23.log --logappend --fork
./mongod --shardsvr --replSet shard3 --port 27019 --dbpath /home/mongodb/workspace/data/shard33 --oplogSize 100 --logpath /home/mongodb/workspace/data/log/shard33.log --logappend --fork
在57上执行./mongo 202.173.88.57:27019/admin
config = {_id: 'shard3', members: [{_id: 0, host: '202.173.88.57:27019'},{_id: 1, host: '202.173.88.59:27019'},{_id: 2, host: '202.173.88.69:27019'}]}
rs.initiate(config);
检查shard3结果
shard3:PRIMARY> rs.status()
{
"set" : "shard3",
"date" : ISODate("2013-04-17T03:50:50Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "202.173.88.57:27019",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 176,
"optime" : Timestamp(1366170617000, 1),
"optimeDate" : ISODate("2013-04-17T03:50:17Z"),
"self" : true
},
{
"_id" : 1,
"name" : "202.173.88.59:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 33,
"optime" : Timestamp(1366170617000, 1),
"optimeDate" : ISODate("2013-04-17T03:50:17Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:50:49Z"),
"pingMs" : 0
},
{
"_id" : 2,
"name" : "202.173.88.69:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 33,
"optime" : Timestamp(1366170617000, 1),
"optimeDate" : ISODate("2013-04-17T03:50:17Z"),
"lastHeartbeat" : ISODate("2013-04-17T03:50:49Z"),
"pingMs" : 0
}
],
"ok" : 1
}
3.5 在57,59,69中分别配置config,mongos
./mongod --configsvr --dbpath /home/mongodb/workspace/data/config --port 20000 --logpath /home/mongodb/workspace/data/log/config.log --logappend --fork
在59中配置mongos
./mongos --configdb 202.173.88.57:20000,202.173.88.59:20000,202.173.88.69:20000 --port 40000 --chunkSize 5 --logpath /home/mongodb/workspace/data/log/mongos.log --logappend --fork
3.6 配置sharding
./mongo 202.173.88.59:40000/admin
db.runCommand({addshard:"shard1/202.173.88.57:27017,202.173.88.59:27017,202.173.88.69:27017",name:"shard1"})
db.runCommand({addshard:"shard2/202.173.88.57:27018,202.173.88.59:27018,202.173.88.69:27018",name:"shard2"})
db.runCommand({addshard:"shard3/202.173.88.57:27019,202.173.88.59:27019,202.173.88.69:27019",name:"shard3"})
sharding结果查看:
mongos> printShardingSizes()
Wed Apr 17 11:58:52 starting new replica set monitor for replica set shard1 with seed of 202.173.88.57:27017,202.173.88.59:27017,202.173.88.69:27017
Wed Apr 17 11:58:52 successfully connected to seed 202.173.88.57:27017 for replica set shard1
Wed Apr 17 11:58:52 changing hosts to { 0: "202.173.88.57:27017", 1: "202.173.88.69:27017", 2: "202.173.88.59:27017" } from shard1/
Wed Apr 17 11:58:52 trying to add new host 202.173.88.57:27017 to replica set shard1
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.57:27017 in replica set shard1
Wed Apr 17 11:58:52 trying to add new host 202.173.88.59:27017 to replica set shard1
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.59:27017 in replica set shard1
Wed Apr 17 11:58:52 trying to add new host 202.173.88.69:27017 to replica set shard1
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.69:27017 in replica set shard1
Wed Apr 17 11:58:52 Primary for replica set shard1 changed to 202.173.88.57:27017
Wed Apr 17 11:58:52 replica set monitor for replica set shard1 started, address is shard1/202.173.88.57:27017,202.173.88.59:27017,202.173.88.69:27017
Wed Apr 17 11:58:52 [ReplicaSetMonitorWatcher] starting
Wed Apr 17 11:58:52 starting new replica set monitor for replica set shard2 with seed of 202.173.88.57:27018,202.173.88.59:27018,202.173.88.69:27018
Wed Apr 17 11:58:52 successfully connected to seed 202.173.88.57:27018 for replica set shard2
Wed Apr 17 11:58:52 changing hosts to { 0: "202.173.88.57:27018", 1: "202.173.88.69:27018", 2: "202.173.88.59:27018" } from shard2/
Wed Apr 17 11:58:52 trying to add new host 202.173.88.57:27018 to replica set shard2
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.57:27018 in replica set shard2
Wed Apr 17 11:58:52 trying to add new host 202.173.88.59:27018 to replica set shard2
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.59:27018 in replica set shard2
Wed Apr 17 11:58:52 trying to add new host 202.173.88.69:27018 to replica set shard2
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.69:27018 in replica set shard2
Wed Apr 17 11:58:52 Primary for replica set shard2 changed to 202.173.88.57:27018
Wed Apr 17 11:58:52 replica set monitor for replica set shard2 started, address is shard2/202.173.88.57:27018,202.173.88.59:27018,202.173.88.69:27018
Wed Apr 17 11:58:52 starting new replica set monitor for replica set shard3 with seed of 202.173.88.57:27019,202.173.88.59:27019,202.173.88.69:27019
Wed Apr 17 11:58:52 successfully connected to seed 202.173.88.57:27019 for replica set shard3
Wed Apr 17 11:58:52 changing hosts to { 0: "202.173.88.57:27019", 1: "202.173.88.69:27019", 2: "202.173.88.59:27019" } from shard3/
Wed Apr 17 11:58:52 trying to add new host 202.173.88.57:27019 to replica set shard3
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.57:27019 in replica set shard3
Wed Apr 17 11:58:52 trying to add new host 202.173.88.59:27019 to replica set shard3
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.59:27019 in replica set shard3
Wed Apr 17 11:58:52 trying to add new host 202.173.88.69:27019 to replica set shard3
Wed Apr 17 11:58:52 successfully connected to new host 202.173.88.69:27019 in replica set shard3
Wed Apr 17 11:58:52 Primary for replica set shard3 changed to 202.173.88.57:27019
Wed Apr 17 11:58:52 replica set monitor for replica set shard3 started, address is shard3/202.173.88.57:27019,202.173.88.59:27019,202.173.88.69:27019
--- Sharding Status ---
sharding version: { "_id" : 1, "version" : 3 }
shards:
{
"_id" : "shard1",
"host" : "shard1/202.173.88.57:27017,202.173.88.59:27017,202.173.88.69:27017"
}
{
"_id" : "shard2",
"host" : "shard2/202.173.88.57:27018,202.173.88.59:27018,202.173.88.69:27018"
}
{
"_id" : "shard3",
"host" : "shard3/202.173.88.57:27019,202.173.88.59:27019,202.173.88.69:27019"
}
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
3.7配置分片用户(mongodb_test)
db.runCommand( { enablesharding : "mongodb_test" } );
3.8 设置mongodb_test用户下的集合为分片
db.runCommand( { shardcollection : "mongodb_test.student_1", key : { stuId: 1 } } )
3.9 测试:
1. insert 数据 登录mongos
for (var i = 1; i <= 300000; i++) db.student_1.insert({stuId:i, name:"East271536394"+i, addr:"Beijing",
country:"China"})
3.10 在replica set 中查看数据
shard2:PRIMARY> db.student_1.find().count()
40957
shard3:PRIMARY> db.student_1.find().count()
28419
shard1:PRIMARY> db.student_1.find().count()
230624
mongos> printShardingStatus()
--- Sharding Status ---
sharding version: { "_id" : 1, "version" : 3 }
shards:
{ "_id" : "shard1", "host" : "shard1/202.173.88.57:27017,202.173.88.59:27017,202.173.88.69:27017" }
{ "_id" : "shard2", "host" : "shard2/202.173.88.57:27018,202.173.88.59:27018,202.173.88.69:27018" }
{ "_id" : "shard3", "host" : "shard3/202.173.88.57:27019,202.173.88.59:27019,202.173.88.69:27019" }
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "mongodb_test", "partitioned" : true, "primary" : "shard1" }
mongodb_test.student_1 chunks:
shard2 4
shard3 4
shard1 4
{ "stuId" : { $minKey : 1 } } -->> { "stuId" : 1 } on : shard2 Timestamp(2000, 0)
{ "stuId" : 1 } -->> { "stuId" : 369 } on : shard3 Timestamp(3000, 2)
{ "stuId" : 369 } -->> { "stuId" : 1000 } on : shard3 Timestamp(3000, 3)
{ "stuId" : 1000 } -->> { "stuId" : 14463 } on : shard2 Timestamp(4000, 0)
{ "stuId" : 14463 } -->> { "stuId" : 28210 } on : shard2 Timestamp(5000, 0)
{ "stuId" : 28210 } -->> { "stuId" : 41957 } on : shard3 Timestamp(6000, 0)
{ "stuId" : 41957 } -->> { "stuId" : 55704 } on : shard2 Timestamp(7000, 0)
{ "stuId" : 55704 } -->> { "stuId" : 69377 } on : shard3 Timestamp(8000, 0)
{ "stuId" : 69377 } -->> { "stuId" : 83124 } on : shard1 Timestamp(8000, 1)
{ "stuId" : 83124 } -->> { "stuId" : 110515 } on : shard1 Timestamp(3000, 16)
{ "stuId" : 110515 } -->> { "stuId" : 137744 } on : shard1 Timestamp(3000, 18)
{ "stuId" : 137744 } -->> { "stuId" : { $maxKey : 1 } } on : shard1 Timestamp(3000, 19)
{ "_id" : "km_mongodb", "partitioned" : false, "primary" : "shard2" }
完毕!