Network Security Internet Technology Development Database Servers Mobile Phone Android Software Apple Software Computer Software News IT Information

In addition to Weibo, there is also WeChat

Please pay attention

WeChat public account

Shulou

57-4 Database slicing concept and implementation of mongodb sharding

2025-02-25 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Database >

Share

Shulou(Shulou.com)06/01 Report--

The concept of 04 database slicing and the implementation of mongodb sharding

Configure the environment:

Node1: 192.168.1.121 CentOS release 6.7

Node2: 192.168.1.122 CentOS release 6.7

Node3: 192.168.1.123 CentOS release 6.7

[root@node1 ~] # vim / etc/hosts

Add

192.168.1.121 node1

192.168.1.122 node2

192.168.1.123 node3

[root@node1 ~] # scp / etc/hosts node2:/etc

[root@node1 ~] # scp / etc/hosts node3:/etc

[root@node1 ~] # service mongod stop

[root@node1 ~] # vim / etc/mongod.conf

Modify

# replSet=setname

For

ReplSet=testSet

ReplIndexPrefetch=_id_only

[root@node1 ~] # service mongod start

[root@node1 ~] # mongo

MongoDB shell version: 2.6.4

Connecting to: test

> show dbs

Admin (empty)

Local 0.078GB

Testdb 0.078GB

> use local

Switched to db local

> show collections

Startup_log

System.indexes

> exit

Bye

[root@node1 mongodb-2.6.4] # scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node2:/root

[root@node1 mongodb-2.6.4] # scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node3:/root

[root@node2] # yum-y install * rpm

[root@node2] # mkdir-p / mongodb/data

[root@node2] # chown-R mongod.mongod / mongodb/

[root@node3] # yum-y install * rpm

[root@node3] # mkdir-p / mongodb/data

[root@node3] # chown-R mongod.mongod / mongodb/

[root@node1 ~] # scp / etc/mongod.conf node2:/etc/

[root@node1 ~] # scp / etc/mongod.conf node3:/etc/

[root@node2 ~] # service mongod start

[root@node3 ~] # service mongod start

[root@node1 ~] # mongo

MongoDB shell version: 2.6.4

Connecting to: test

> rs.status ()

{

"startupStatus": 3

"info": "run rs.initiate (...) if not yet done for the set"

"ok": 0

"errmsg": "can't get local.system.replset config from self or any seed (EMPTYCONFIG)"

}

> rs.initiate ()

{

"info2": "no configuration explicitly specified-making one"

"me": "node1:27017"

Info: "Config now saved locally. Should come online in about a minute."

"ok": 1

}

> rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-08T14:33:14Z")

"myState": 1

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 1316

Optime: Timestamp (1483885955, 1)

OptimeDate: ISODate ("2017-01-08T14:32:35Z")

ElectionTime: Timestamp (1483885956, 1)

ElectionDate: ISODate ("2017-01-08T14:32:36Z")

"self": true

}

]

"ok": 1

}

# add Node

TestSet:PRIMARY > rs.add ("192.168.1.122")

{"ok": 1}

TestSet:PRIMARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-08T14:38:50Z")

"myState": 1

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 1652

Optime: Timestamp (1483886304, 1)

OptimeDate: ISODate ("2017-01-08T14:38:24Z")

ElectionTime: Timestamp (1483885956, 1)

ElectionDate: ISODate ("2017-01-08T14:32:36Z")

"self": true

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 26

Optime: Timestamp (1483886304, 1)

OptimeDate: ISODate ("2017-01-08T14:38:24Z")

LastHeartbeat: ISODate ("2017-01-08T14:38:48Z")

LastHeartbeatRecv: ISODate ("2017-01-08T14:38:48Z")

"pingMs": 1

"syncingTo": "node1:27017"

}

]

"ok": 1

}

[root@node2 ~] # mongo

MongoDB shell version: 2.6.4

Connecting to: test

Welcome to the MongoDB shell.

For interactive help, type "help".

For more comprehensive documentation, see

Http://docs.mongodb.org/

Questions? Try the support group

Http://groups.google.com/group/mongodb-user

TestSet:SECONDARY > show dbs

Admin (empty)

Local 1.078GB

Testdb 0.078GB

TestSet:SECONDARY > use testdb

Switched to db testdb

TestSet:SECONDARY > rs.slaveOk ()

TestSet:SECONDARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-09T12:02:14Z")

"myState": 2

"syncingTo": "node1:27017"

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 77028

Optime: Timestamp (1483886304, 1)

OptimeDate: ISODate ("2017-01-08T14:38:24Z")

LastHeartbeat: ISODate ("2017-01-09T12:02:13Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:02:13Z")

"pingMs": 1

ElectionTime: Timestamp (1483885956, 1)

ElectionDate: ISODate ("2017-01-08T14:32:36Z")

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 77851

Optime: Timestamp (1483886304, 1)

OptimeDate: ISODate ("2017-01-08T14:38:24Z")

"self": true

}

]

"ok": 1

}

TestSet:SECONDARY > rs.isMaster ()

{

"setName": "testSet"

"setVersion": 2

"ismaster": false

"secondary": true

"hosts": [

"192.168.1.122purl 27017"

"node1:27017"

]

"primary": "node1:27017"

"me": "192.168.1.122 purl 27017"

"maxBsonObjectSize": 16777216

"maxMessageSizeBytes": 48000000

"maxWriteBatchSize": 1000

LocalTime: ISODate ("2017-01-09T12:03:59.702Z")

"maxWireVersion": 2

"minWireVersion": 0

"ok": 1

}

TestSet:PRIMARY > rs.isMaster ()

{

"setName": "testSet"

"setVersion": 2

"ismaster": true

"secondary": false

"hosts": [

"node1:27017"

"192.168.1.122purl 27017"

]

"primary": "node1:27017"

"me": "node1:27017"

"maxBsonObjectSize": 16777216

"maxMessageSizeBytes": 48000000

"maxWriteBatchSize": 1000

LocalTime: ISODate ("2017-01-09T12:05:47.182Z")

"maxWireVersion": 2

"minWireVersion": 0

"ok": 1

}

# add new nodes

TestSet:PRIMARY > rs.add ("192.168.1.123")

{"ok": 1}

[root@node3 ~] # mongo

MongoDB shell version: 2.6.4

Connecting to: test

Welcome to the MongoDB shell.

For interactive help, type "help".

For more comprehensive documentation, see

Http://docs.mongodb.org/

Questions? Try the support group

Http://groups.google.com/group/mongodb-user

TestSet:SECONDARY > rs.slaveOk ()

TestSet:SECONDARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-09T12:10:20Z")

"myState": 2

"syncingTo": "node1:27017"

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 78

Optime: Timestamp (1483963739, 1)

OptimeDate: ISODate ("2017-01-09T12:08:59Z")

LastHeartbeat: ISODate ("2017-01-09T12:10:18Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:10:19Z")

"pingMs": 1

ElectionTime: Timestamp (1483885956, 1)

ElectionDate: ISODate ("2017-01-08T14:32:36Z")

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 78

Optime: Timestamp (1483963739, 1)

OptimeDate: ISODate ("2017-01-09T12:08:59Z")

LastHeartbeat: ISODate ("2017-01-09T12:10:18Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:10:18Z")

"pingMs": 1

"syncingTo": "node1:27017"

}

{

"_ id": 2

"name": "192.168.1.123 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 78317

Optime: Timestamp (1483963739, 1)

OptimeDate: ISODate ("2017-01-09T12:08:59Z")

"self": true

}

]

"ok": 1

}

TestSet:SECONDARY > use testdb

Switched to db testdb

TestSet:SECONDARY > db.students.findOne ()

{"_ id": ObjectId ("5871e94113222f399a5240a3"), "name": "tom", "age": 23}

TestSet:SECONDARY > rs.conf ()

{

"_ id": "testSet"

"version": 3

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

TestSet:PRIMARY > use testdb

Switched to db testdb

TestSet:PRIMARY > db.classes.insert ({class: "One", nostu: 40})

WriteResult ({"nInserted": 1})

TestSet:PRIMARY > show collections

Classes

Students

System.indexes

TestSet:SECONDARY > db.classes.findOne ()

{

"_ id": ObjectId ("58737e8606a316aec46edfdc")

"class": "One"

"nostu": 40

}

TestSet:SECONDARY > db.classes.insert ({class: "Two", nostu: 50})

WriteResult ({"writeError": {"code": undefined, "errmsg": "not master"}})

TestSet:SECONDARY > rs.conf ()

{

"_ id": "testSet"

"version": 3

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

# make the master node "step down"

TestSet:PRIMARY > rs.stepDown ()

2017-01-09T20:23:48.978+0800 DBClientCursor::init call () failed

2017-01-09T20:23:48.980+0800 Error: error doing query: failed at src/mongo/shell/query.js:81

2017-01-09T20:23:48.982+0800 trying reconnect to 127.0.0.1 27017 (127.0.0.1) failed

2017-01-09T20:23:48.984+0800 reconnect 127.0.0.1 27017 (127.0.0.1) ok

TestSet:SECONDARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-09T12:24:27Z")

"myState": 2

"syncingTo": "192.168.1.123 purl 27017"

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 79989

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

"infoMessage": "syncing to: 192.168.1.123pur27017"

"self": true

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 78363

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

LastHeartbeat: ISODate ("2017-01-09T12:24:25Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:24:25Z")

"pingMs": 1

"lastHeartbeatMessage": "syncing to: node1:27017"

"syncingTo": "node1:27017"

}

{

"_ id": 2

"name": "192.168.1.123 purl 27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 928

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

LastHeartbeat: ISODate ("2017-01-09T12:24:26Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:24:25Z")

"pingMs": 1

ElectionTime: Timestamp (1483964629, 1)

ElectionDate: ISODate ("2017-01-09T12:23:49Z")

}

]

"ok": 1

}

TestSet:PRIMARY > db.printReplicationInfo ()

Configured oplog size: 990MB

Log length start to end: 299secs (0.08hrs)

Oplog first event time: Mon Jan 09 2017 20:08:59 GMT+0800 (CST)

Oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)

Now: Mon Jan 09 2017 20:27:20 GMT+0800 (CST)

TestSet:SECONDARY > db.printReplicationInfo ()

Configured oplog size: 990MB

Log length start to end: 77734secs (21.59hrs)

Oplog first event time: Sun Jan 08 2017 22:38:24 GMT+0800 (CST)

Oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)

Now: Mon Jan 09 2017 20:28:01 GMT+0800 (CST)

TestSet:SECONDARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-09T12:29:38Z")

"myState": 2

"syncingTo": "node1:27017"

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 78672

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

LastHeartbeat: ISODate ("2017-01-09T12:29:37Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:29:37Z")

"pingMs": 1

"syncingTo": "192.168.1.123 purl 27017"

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 79495

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

"self": true

}

{

"_ id": 2

"name": "192.168.1.123 purl 27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 1238

Optime: Timestamp (1483964038, 1)

OptimeDate: ISODate ("2017-01-09T12:13:58Z")

LastHeartbeat: ISODate ("2017-01-09T12:29:37Z")

LastHeartbeatRecv: ISODate ("2017-01-09T12:29:37Z")

"pingMs": 1

ElectionTime: Timestamp (1483964629, 1)

ElectionDate: ISODate ("2017-01-09T12:23:49Z")

}

]

"ok": 1

}

# node1 node offline

TestSet:SECONDARY > exit

Bye

[root@node1 ~] # service mongod stop

Stopping mongod: [OK]

# Save the configuration file to cfg (must be configured on the primary node)

TestSet:PRIMARY > cfg=rs.conf ()

{

"_ id": "testSet"

"version": 3

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

# set node priority

TestSet:PRIMARY > cfg.members [1] .priority = 2

two

# reread the cfg configuration file

TestSet:PRIMARY > rs.reconfig (cfg)

2017-01-09T21:08:58.403+0800 DBClientCursor::init call () failed

2017-01-09T21:08:58.404+0800 Error: error doing query: failed at src/mongo/shell/query.js:81

2017-01-09T21:08:58.406+0800 trying reconnect to 127.0.0.1 27017 (127.0.0.1) failed

2017-01-09T21:08:58.407+0800 reconnect 127.0.0.1 27017 (127.0.0.1) ok

TestSet:SECONDARY >

TestSet:SECONDARY > rs.status ()

{

"set": "testSet"

Date: ISODate ("2017-01-09T13:09:46Z")

"myState": 2

"syncingTo": "192.168.1.122 purl 27017"

"members": [

{

"_ id": 0

"name": "node1:27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 98

Optime: Timestamp (1483967288, 1)

OptimeDate: ISODate ("2017-01-09T13:08:08Z")

LastHeartbeat: ISODate ("2017-01-09T13:09:45Z")

LastHeartbeatRecv: ISODate ("2017-01-09T13:09:45Z")

"pingMs": 1

"lastHeartbeatMessage": "syncing to: 192.168.1.122pur27017"

"syncingTo": "192.168.1.122 purl 27017"

}

{

"_ id": 1

"name": "192.168.1.122 purl 27017"

"health": 1

"state": 1

"stateStr": "PRIMARY"

"uptime": 98

Optime: Timestamp (1483967288, 1)

OptimeDate: ISODate ("2017-01-09T13:08:08Z")

LastHeartbeat: ISODate ("2017-01-09T13:09:45Z")

LastHeartbeatRecv: ISODate ("2017-01-09T13:09:46Z")

"pingMs": 1

ElectionTime: Timestamp (1483967290, 1)

ElectionDate: ISODate ("2017-01-09T13:08:10Z")

}

{

"_ id": 2

"name": "192.168.1.123 purl 27017"

"health": 1

"state": 2

"stateStr": "SECONDARY"

"uptime": 81883

Optime: Timestamp (1483967288, 1)

OptimeDate: ISODate ("2017-01-09T13:08:08Z")

"infoMessage": "syncing to: 192.168.1.122pur27017"

"self": true

}

]

"ok": 1

}

TestSet:SECONDARY > rs.conf ()

{

"_ id": "testSet"

"version": 4

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

"priority": 2

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

TestSet:PRIMARY > cfg=rs.conf ()

{

"_ id": "testSet"

"version": 4

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

"priority": 2

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

TestSet:PRIMARY > cfg.members [2] .arbiterOnly = true

True

TestSet:PRIMARY > rs.reconfig (cfg)

{

"errmsg": "exception: arbiterOnly may not change for members"

"code": 13510

"ok": 0

}

TestSet:PRIMARY > rs.conf ()

{

"_ id": "testSet"

"version": 4

"members": [

{

"_ id": 0

"host": "node1:27017"

}

{

"_ id": 1

"host": "192.168.1.122 purl 27017"

"priority": 2

}

{

"_ id": 2

"host": "192.168.1.123 purl 27017"

}

]

}

TestSet:PRIMARY > rs.printSlaveReplicationInfo ()

Source: node1:27017

SyncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)

0 secs (0 hrs) behind the primary

Source: 192.168.1.123:27017

SyncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)

0 secs (0 hrs) behind the primary

[root@node1 ~] # service mongod stop

[root@node2 ~] # service mongod stop

[root@node1 ~] # rm-rf / mongodb/

[root@node2 ~] # rm-rf / mongodb/data/

[root@node3 ~] # rm-rf / mongodb/data/

[root@node3 ~] # scp * rpm node4:/root

[root@node4] # mkdir-p / mongodb/data

[root@node4] # yum-y install * rpm

[root@node4] # chown-R mongod.mongod / mongodb/

[root@node2 ~] # vim / etc/mongod.conf

Modify

ReplSet=testSet

ReplIndexPrefetch=_id_only

For

# replSet=testSet

# replIndexPrefetch=_id_only

Add

Dbpath=/mongodb/data

Configsvr=true

[root@node2] # install-o mongod-g mongod-d / mongodb/data

[root@node2 ~] # ls-ld / mongodb/data/

Drwxr-xr-x 2 mongod mongod 4096 Jan 9 22:13 / mongodb/data/

[root@node2 ~] # service mongod start

[root@node1 ~] # cd mongodb-2.6.4/

[root@node1 mongodb-2.6.4] # yum-y install mongodb-org-mongos-2.6.4-1.x86_64.rpm

[root@node2 ~] # service mongod stop

[root@node2 ~] # rm-rf / mongodb/data/*

[root@node2 ~] # service mongod start

[root@node1 mongodb-2.6.4] # mongos-configdb=192.168.1.122-fork

[root@node1 mongodb-2.6.4] # mongos-configdb=192.168.1.122-fork-logpath=/var/log/mongodb/mongod.log

2017-01-09T22:28:03.812+0800 warning: running with 1 config server should be done only for testing purposes and is not recommended for production

About to fork child process, waiting until server is ready for connections.

Forked process: 18397

Child process started successfully, parent exiting

[root@node1 mongodb-2.6.4] # mongo-- host 192.168.1.121

MongoDB shell version: 2.6.4

Connecting to: 192.168.1.121:27017/test

[root@node3] # install-o mongod-g mongod-d / mongodb/data

[root@node3 ~] # vim / etc/mongod.conf

Modify

ReplSet=testSet

ReplIndexPrefetch=_id_only

For

# replSet=testSet

# replIndexPrefetch=_id_only

[root@node3 ~] # service mongod start

[root@node4 ~] # vim / etc/mongod.conf

Modify

Dbpath=/var/lib/mongo

For

Dbpath=/mongodb/data

Modify

Bind_ip=127.0.0.1

For

# bind_ip=127.0.0.1

[root@node4 ~] # service mongod start

Mongos > sh.addShard ("192.168.1.122")

{

"ok": 0

"errmsg": "couldn't connect to new shard socket exception [CONNECT_ERROR] for 192.168.1.122 purl 27017"

}

Mongos > sh.status ()

-Sharding Status

Sharding version: {

"_ id": 1

"version": 4

"minCompatibleVersion": 4

"currentVersion": 5

"clusterId": ObjectId ("58739d7487c21f53b917098b")

}

Shards:

Databases:

{"_ id": "admin", "partitioned": false, "primary": "config"}

Mongos > sh.addShard ("192.168.1.123")

{

"ok": 0

"errmsg": "host is part of set testSet, use replica set url format /,...."

}

100DU 33 (91411)

Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.

Views: 0

*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.

Share To

Database

Wechat

© 2024 shulou.com SLNews company. All rights reserved.

12
Report