Network Security Internet Technology Development Database Servers Mobile Phone Android Software Apple Software Computer Software News IT Information

In addition to Weibo, there is also WeChat

Please pay attention

WeChat public account

Shulou

Mongodb3.2 sharding deploy

2025-01-16 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Database >

Share

Shulou(Shulou.com)06/01 Report--

I. deployment of software

Install mongodb for pc1, pc2 and pc3, respectively, as follows:

[root@pc1 ~] # tail / etc/security/limits.conf

Mongod soft nproc 4000 * hard nofile 1000000 * soft nofile 1000000 * soft core unlimited* soft stack 10240*-nofile 65535push-nproc 65535push-nofile 320000work-nproc 10000

[root@pc1 ~] # tail / etc/security/limits.d/90-nproc.conf

* soft nproc 1024root soft nproc unlimited

[root@pc1 ~] # cat / etc/yum.repos.d/mongodb.repo

[mongodb-org-3.2] name=MongoDB Repositorybaseurl= https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/3.2/x86_64/gpgcheck=1enabled=1gpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc

[root@pc1 ~] # yum-y install mongodb-org

[root@pc1 ~] # chkconfig mongodb off

[root@pc1 ~] # mkdir-p / data/mongodb/ {config,data/ {config,logs,shard {1pcro 3} _ 1}}

[root@pc1] # chown-R mongod.mongod / data/mongodb

[root@pc1 ~] # cp / etc/mongod.conf / data/mongodb/config/shard1.conf

[root@pc1 ~] # tree / data/

/ data/

└── mongodb

├── config

│ └── shard1.conf

└── data

├── config

├── logs

├── shard1_1

├── shard2_1

└── shard3_1

II. Configuration of services

The configuration file and startup script for replica set 1 are as follows:

[root@pc1 ~] # egrep "^ [^ # $]" / data/mongodb/config/shard1.conf

SystemLog: destination: file logAppend: true path: / data/mongodb/data/logs/shard1.logstorage: dbPath: / data/mongodb/data/shard1_1 directoryPerDB: true journal: enabled: trueprocessManagement: fork: true # fork and run in background pidFilePath: / var/run/mongodb/shard1.pid # location of pidfilenet: port: 27027 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.replication: oplogSizeMB: 500 replSetName: shard1sharding: clusterRole: shardsvr

[root@pc1 ~] # cp / etc/init.d/mongod / etc/init.d/shard1

[root@pc1 ~] # vim / etc/init.d/shard1

[root@pc1 ~] # egrep "^ [^ # $]" / etc/init.d/shard1

. / etc/rc.d/init.d/functionsCONFIGFILE= "/ data/mongodb/config/shard1.conf" OPTIONS= "- f $CONFIGFILE" mongod=$ {MONGOD-/usr/bin/mongod} MONGO_USER=mongodMONGO_GROUP=mongodSYSCONFIG= "/ etc/sysconfig/mongod" if [- f "$SYSCONFIG"]; then. "$SYSCONFIG" fiNUMACTL_ARGS= "- interleave=all" if which numactl > / dev/null 2 > / dev/null & & numactl $NUMACTL_ARGS ls / > / dev/null 2 > / dev/nullthen NUMACTL= "numactl $NUMACTL_ARGS" else NUMACTL= "fiPIDFILEPATH= `awk-F' [: =]'- v IGNORECASE=1'/ ^ [[: blank:]] * (processManagement\.)? pidfilepath [[: blank:]] * [: =] [[: blank:]] * / {print $2}'" $CONFIGFILE "| tr-d "[: blank:]\"'| awk-Fairchild'{print $1} '`PIDDIR= `dirname $PIDFILEPATH`start () {# Make sure the default pidfile directory exists if [!-d $PIDDIR] Then install-d-m 0755-o $MONGO_USER-g $MONGO_GROUP $PIDDIR fiif test-f / sys/kernel/mm/transparent_hugepage/defrag Thenecho never > / sys/kernel/mm/transparent_hugepage/defragfi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit-f unlimited ulimit-t unlimited ulimit-v unlimited ulimit-n 64000 ulimit-m unlimited ulimit-u 64000 echo-n $"Starting mongod:" daemon-- user "$MONGO_USER"-- check $mongod "$NUMACTL $mongod $OPTIONS > / dev/null 2 > & 1" RETVAL=$? Echo [$RETVAL-eq 0] & & touch / var/lock/subsys/shard1} stop () {echo-n $"Stopping mongod:" mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? Echo [$RETVAL-eq 0] & & rm-f / var/lock/subsys/shard1} restart () {stop start} mongo_killproc () {local pid_file=$1 local procname=$2 local-I delay=300 local-I duration=10 local pid= `pidofproc-p "$pid_file}" ${procname} `kill-TERM $pid > / dev/null 2 > & 1 usleep 100000 local-I x delay 0 while [$x-le $delay] & & checkpid $pid Do sleep $duration $(($x + $duration)) done kill-KILL $pid > / dev/null 2 > & 1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? ["$RC"-eq 0] & & failure "${procname} shutdown" | | rm-f "${pid_file}"; success "${procname} shutdown" RC=$ ((! $RC)) # invert return code so we return 0 when process is dead. Return $RC} RETVAL=0case "$1" in start) start;; stop) stop;; restart | reload | force-reload) restart;; condrestart) [- f / var/lock/subsys/shard1] & & restart | |:; status) status $mongod RETVAL=$?;; *) echo "Usage: $0 {start | stop | status | restart | reload | force-reload | condrestart}" RETVAL=1esacexit $RETVAL

The configuration file and startup script for replica set 2 are as follows:

[root@pc1 ~] # egrep "^ [^ # $]" / data/mongodb/config/shard2.conf

SystemLog: destination: file logAppend: true path: / data/mongodb/data/logs/shard2.logstorage: dbPath: / data/mongodb/data/shard2_1 directoryPerDB: true journal: enabled: trueprocessManagement: fork: true # fork and run in background pidFilePath: / var/run/mongodb/shard2.pid # location of pidfilenet: port: 27028 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.replication: oplogSizeMB: 500 replSetName: shard2sharding: clusterRole: shardsvr

[root@pc1 ~] # cp / etc/init.d/mongod / etc/init.d/shard2

[root@pc1 ~] # vim / etc/init.d/shard2

[root@pc1 ~] # egrep "^ [^ # $]" / etc/init.d/shard2

. / etc/rc.d/init.d/functionsCONFIGFILE= "/ data/mongodb/config/shard2.conf" OPTIONS= "- f $CONFIGFILE" mongod=$ {MONGOD-/usr/bin/mongod} MONGO_USER=mongodMONGO_GROUP=mongodSYSCONFIG= "/ etc/sysconfig/mongod" if [- f "$SYSCONFIG"]; then. "$SYSCONFIG" fiNUMACTL_ARGS= "- interleave=all" if which numactl > / dev/null 2 > / dev/null & & numactl $NUMACTL_ARGS ls / > / dev/null 2 > / dev/nullthen NUMACTL= "numactl $NUMACTL_ARGS" else NUMACTL= "fiPIDFILEPATH= `awk-F' [: =]'- v IGNORECASE=1'/ ^ [[: blank:]] * (processManagement\.)? pidfilepath [[: blank:]] * [: =] [[: blank:]] * / {print $2}'" $CONFIGFILE "| tr-d "[: blank:]\"'| awk-Fairchild'{print $1} '`PIDDIR= `dirname $PIDFILEPATH`start () {# Make sure the default pidfile directory exists if [!-d $PIDDIR] Then install-d-m 0755-o $MONGO_USER-g $MONGO_GROUP $PIDDIR fiif test-f / sys/kernel/mm/transparent_hugepage/defrag Thenecho never > / sys/kernel/mm/transparent_hugepage/defragfi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit-f unlimited ulimit-t unlimited ulimit-v unlimited ulimit-n 64000 ulimit-m unlimited ulimit-u 64000 echo-n $"Starting mongod:" daemon-- user "$MONGO_USER"-- check $mongod "$NUMACTL $mongod $OPTIONS > / dev/null 2 > & 1" RETVAL=$? Echo [$RETVAL-eq 0] & & touch / var/lock/subsys/shard2} stop () {echo-n $"Stopping mongod:" mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? Echo [$RETVAL-eq 0] & & rm-f / var/lock/subsys/shard2} restart () {stop start} mongo_killproc () {local pid_file=$1 local procname=$2 local-I delay=300 local-I duration=10 local pid= `pidofproc-p "$pid_file}" ${procname} `kill-TERM $pid > / dev/null 2 > & 1 usleep 100000 local-I x delay 0 while [$x-le $delay] & & checkpid $pid Do sleep $duration $(($x + $duration)) done kill-KILL $pid > / dev/null 2 > & 1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? ["$RC"-eq 0] & & failure "${procname} shutdown" | | rm-f "${pid_file}"; success "${procname} shutdown" RC=$ ((! $RC)) # invert return code so we return 0 when process is dead. Return $RC} RETVAL=0case "$1" in start) start;; stop) stop;; restart | reload | force-reload) restart;; condrestart) [- f / var/lock/subsys/shard2] & & restart | |:; status) status $mongod RETVAL=$?;; *) echo "Usage: $0 {start | stop | status | restart | reload | force-reload | condrestart}" RETVAL=1esacexit $RETVAL

The configuration file and startup script for replica set 3 are as follows:

[root@pc1 ~] # egrep "^ [^ # $]" / data/mongodb/config/shard3.conf

SystemLog: destination: file logAppend: true path: / data/mongodb/data/logs/shard3.logstorage: dbPath: / data/mongodb/data/shard3_1 directoryPerDB: true journal: enabled: trueprocessManagement: fork: true # fork and run in background pidFilePath: / var/run/mongodb/shard3.pid # location of pidfilenet: port: 27026 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.replication: oplogSizeMB: 500 replSetName: shard3sharding: clusterRole: shardsvr

[root@pc1 ~] # cp / etc/init.d/mongod / etc/init.d/shard3

[root@pc1 ~] # vim / etc/init.d/shard3

[root@pc1 ~] # egrep "^ [^ # $]" / etc/init.d/shard3 (the content is basically the same as shard2 and shard1)

Copy the configuration file to pc2, pc3, and start the service

[root@pc1 ~] # scp / data/mongodb/config/*.conf pc2:/data/mongodb/config/

[root@pc1 ~] # scp / etc/init.d/shard* pc2:/etc/init.d/

[root@pc1 ~] # scp / data/mongodb/config/*.conf pc3:/data/mongodb/config/

[root@pc1 ~] # scp / etc/init.d/shard* pc2:/etc/init.d/

[root@pc1 ~] #

[root@pc1 ~] # / etc/init.d/shard1 start

[root@pc1 ~] # ssh pc2'/ etc/init.d/shard1 start'

[root@pc1 ~] # ssh pc3'/ etc/init.d/shard1 start'

[root@pc1 ~] # / etc/init.d/shard2 start

[root@pc1 ~] # ssh pc2'/ etc/init.d/shard2 start'

[root@pc1 ~] # ssh pc3'/ etc/init.d/shard2 start'

[root@pc1 ~] # / etc/init.d/shard3 start

[root@pc1 ~] # ssh pc2'/ etc/init.d/shard3 start'

[root@pc1 ~] # ssh pc3'/ etc/init.d/shard3 start'

Configure replica set 1 as follows:

[root@pc1 ~] # mongo localhost:27027

MongoDB shell version: 3.2.7connecting to: localhost:27027/test > use adminswitched to db admin > rs.initiate () {"info2": "no configuration specified. Using a default configuration for the set "," me ":" pc1:27027 "," ok ": 1} shard1:OTHER > rs.add (" pc2:27027 ") {" ok ": 1} shard1:PRIMARY > rs.addArb (" pc3:27027 ") {" ok ": 1} shard1:PRIMARY > rs.status () {" set ":" shard1 "," date ": ISODate (" 2016-06-28T03:59:41.403Z ")," myState ": 1," term ": NumberLong (1) HeartbeatIntervalMillis: NumberLong (2000), members: [{"_ id": 0, "name": "pc1:27027", "health": 1, "state": 1, "stateStr": "PRIMARY", "uptime": 1634, "optime": {"ts": Timestamp (1467086346, 1), "t": NumberLong (1)}, "optimeDate": ISODate ("2016-06-28T03:59:06Z") InfoMessage: "could not find member to sync from", "electionTime": Timestamp (1467086301, 2), "electionDate": ISODate ("2016-06-28T03:58:21Z"), "configVersion": 3, "self": true}, {"_ id": 1, "name": "pc2:27027", "health": 1, "state": 2, "stateStr": "SECONDARY", "uptime": 54, "optime": {"ts": Timestamp (1467086346) ), "t": NumberLong (1)}, "optimeDate": ISODate ("2016-06-28T03:59:06Z"), "lastHeartbeat": ISODate ("2016-06-28T03:59:40.050Z"), "lastHeartbeatRecv": ISODate ("2016-06-28T03:59:41.041Z"), "pingMs": NumberLong (1), "syncingTo": "pc1:27027", "configVersion": 3}, {"_ id": 2, "name": "pc3:27027" "health": 1, "state": 7, "ARBITER", "uptime": 35, "lastHeartbeat": ISODate ("2016-06-28T03:59:40.053Z"), "lastHeartbeatRecv": ISODate ("2016-06-28T03:59:41.044Z"), "pingMs": NumberLong (2), "configVersion": 3}], "ok": 1}

The operation of configuration replica set 2 and configuration replica set 3 is basically the same as that of configuration replica set. Note: when configuring replica set 1, pc1 is primary, pc2 is secondary, and pc3 is arbiter; configuration replica set 2, pc2 is primary, pc3 is secondary, and pc1 is arbiter; configuration replica set 3, pc3 is primary, pc1 is secondary, and pc2 is arbiter.

The configuration file and startup file of config server are as follows:

[[root@pc1 ~] # egrep "^ [^ # $]" / data/mongodb/config/config.conf

SystemLog: destination: file logAppend: true path: / data/mongodb/data/logs/config.logstorage: dbPath: / data/mongodb/data/config directoryPerDB: true journal: enabled: trueprocessManagement: fork: true # fork and run in background pidFilePath: / var/run/mongodb/config.pid # location of pidfilenet: port: 27029 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.replication: oplogSizeMB: 500 replSetName: configrssharding: clusterRole: configsvr

[root@pc1 ~] # cp / etc/init.d/mongod / etc/init.d/mconfig

[root@pc1 ~] # vim / etc/init.d/mconfig

[root@pc1 ~] # egrep "^ [^ # $]" / etc/init.d/mconfig

. / etc/rc.d/init.d/functionsCONFIGFILE= "/ data/mongodb/config/config.conf" OPTIONS= "- f $CONFIGFILE" mongod=$ {MONGOD-/usr/bin/mongod} MONGO_USER=mongodMONGO_GROUP=mongodSYSCONFIG= "/ etc/sysconfig/mongod" if [- f "$SYSCONFIG"]; then. "$SYSCONFIG" fiNUMACTL_ARGS= "- interleave=all" if which numactl > / dev/null 2 > / dev/null & & numactl $NUMACTL_ARGS ls / > / dev/null 2 > / dev/nullthen NUMACTL= "numactl $NUMACTL_ARGS" else NUMACTL= "fiPIDFILEPATH= `awk-F' [: =]'- v IGNORECASE=1'/ ^ [[: blank:]] * (processManagement\.)? pidfilepath [[: blank:]] * [: =] [[: blank:]] * / {print $2}'" $CONFIGFILE "| tr-d "[: blank:]\"'| awk-Fairchild'{print $1} '`PIDDIR= `dirname $PIDFILEPATH`start () {# Make sure the default pidfile directory exists if [!-d $PIDDIR] Then install-d-m 0755-o $MONGO_USER-g $MONGO_GROUP $PIDDIR fiif test-f / sys/kernel/mm/transparent_hugepage/defrag Thenecho never > / sys/kernel/mm/transparent_hugepage/defragfi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings ulimit-f unlimited ulimit-t unlimited ulimit-v unlimited ulimit-n 64000 ulimit-m unlimited ulimit-u 64000 echo-n $"Starting mongod:" daemon-- user "$MONGO_USER"-- check $mongod "$NUMACTL $mongod $OPTIONS > / dev/null 2 > & 1" RETVAL=$? Echo [$RETVAL-eq 0] & & touch / var/lock/subsys/mconfig} stop () {echo-n $"Stopping mongod:" mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? Echo [$RETVAL-eq 0] & & rm-f / var/lock/subsys/mconfig} restart () {stop start} mongo_killproc () {local pid_file=$1 local procname=$2 local-I delay=300 local-I duration=10 local pid= `pidofpro c-p "$pid_file}" ${procname} `kill-TERM $pid > / dev/null 2 > & 1 usleep 100000 local-I xpro 0 while [$x-le $delay] & & checkpid $pid Do sleep $duration $(($x + $duration)) done kill-KILL $pid > / dev/null 2 > & 1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? ["$RC"-eq 0] & & failure "${procname} shutdown" | | rm-f "${pid_file}"; success "${procname} shutdown" RC=$ ((! $RC)) # invert return code so we return 0 when process is dead. Return $RC} RETVAL=0case "$1" in start) start;; stop) stop;; restart | reload | force-reload) restart;; condrestart) [- f / var/lock/subsys/mconfig] & & restart | |:; status) status $mongod RETVAL=$?;; *) echo "Usage: $0 {start | stop | status | restart | reload | force-reload | condrestart}" RETVAL=1esacexit $RETVAL

[root@pc1 ~] # scp / data/mongodb/config/config.conf pc2:/data/mongodb/config

[root@pc1 ~] # scp / etc/init.d/mconfig pc2:/etc/init.d/

[root@pc1 ~] #

[root@pc1 ~] # scp / data/mongodb/config/config.conf pc2:/data/mongodb/config

[root@pc1 ~] # scp / etc/init.d/mconfig pc3:/etc/init.d/

[root@pc1 ~] #

[root@pc1 ~] # / etc/init.d/mconfig start

Starting mongod: [OK]

[root@pc1] # lsof-iRose 27029

COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME

Mongod 2765 mongod 6u IPv4 11520 0t0 TCP *: 27029 (LISTEN)

[root@pc1 ~] # ssh pc2'/ etc/init.d/mconfig start'

[root@pc1 ~] # ssh pc2'/ etc/init.d/mconfig start'

To maintain the consistency of config server and implement the replica set function of each config server, the result is as follows (arbiter is not supported for the replica set of config server):

[root@pc1 ~] # mongo localhost:27029

MongoDB shell version: 3.2.7connecting to: localhost:27029/testconfigrs:SECONDARY > use adminswitched to db adminconfigrs:SECONDARY > rs.slaveOk configrs:SECONDARY > rs.status () {"set": "configrs", "date": ISODate ("2016-07-01T03:25:49.471Z"), "myState": 2, "term": NumberLong (2), "syncingTo": "pc3:27029", "configsvr": true, "heartbeatIntervalMillis": NumberLong (2000) "members": [{"_ id": 0, "name": "pc1:27029", "health": 1, "state": 2, "stateStr": "SECONDARY", "uptime": 3937, "optime": {"ts": Timestamp (1467339616, 1), "t": NumberLong (2)}, "optimeDate": ISODate ("2016-07-01T02:20:16Z"), "syncingTo": "pc3:27029", "configVersion" 3: "self": true, {"_ id": 1, "name": "pc2:27029", "health": 1, "state": 2, "stateStr": "SECONDARY", "uptime": 3936, "optime": {"ts": Timestamp (1467339616, 1), "t": NumberLong (2)}, "optimeDate": ISODate ("2016-07-01T02:20:16Z"), "lastHeartbeat": ISODate ("2016-07-01T03:25:48.578Z") LastHeartbeatRecv: ISODate ("2016-07-01T03:25:47.481Z"), "pingMs": NumberLong (1), "syncingTo": "pc3:27029", "configVersion": 3}, {"_ id": 2, "name": "pc3:27029", "health": 1, "state": 1, "stateStr": "PRIMARY", "uptime": 3936, "optime": {"ts": Timestamp (1467339616, 1) "t": NumberLong (2)}, "optimeDate": ISODate ("2016-07-01T02:20:16Z"), "lastHeartbeat": ISODate ("2016-07-01T03:25:49.146Z"), "lastHeartbeatRecv": ISODate ("2016-07-01T03:25:47.585Z"), "pingMs": NumberLong (1), "electionTime": Timestamp (1467339615, 1), "electionDate": ISODate ("2016-07-01T02:20:15Z"), "configVersion": 3}] "ok": 1} configrs:SECONDARY > exitbye [root@pc1 ~] #

The mongos routing configuration file and startup script are as follows:

[root@pc1 ~] # egrep "^ [^ # $]" / data/mongodb/config/mongos.conf

SystemLog: destination: file logAppend: true path: / data/mongodb/data/logs/mongos.logprocessManagement: fork: true # fork and run in background pidFilePath: / var/run/mongodb/mongos.pid # location of pidfilenet: port: 27017 # bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.sharding: configDB: pc1:27029,pc2:27029,pc3:27029 # Note: mongodb3.4 must be in configDB: configrs_name/configServer_list format

[root@pc1 ~] # cp / etc/init.d/mongod / etc/init.d/mongos

[root@pc1 ~] # vim / etc/init.d/mongos

[root@pc1 ~] # egrep "^ [^ # $]" / etc/init.d/mongos

. / etc/rc.d/init.d/functionsCONFIGFILE= "/ data/mongodb/config/mongos.conf" OPTIONS= "- f $CONFIGFILE" mongod=$ {MONGOD-/usr/bin/mongos} MONGO_USER=mongodMONGO_GROUP=mongodSYSCONFIG= "/ etc/sysconfig/mongod" if [- f "$SYSCONFIG"]; then. "$SYSCONFIG" fiNUMACTL_ARGS= "- interleave=all" if which numactl > / dev/null 2 > / dev/null & & numactl $NUMACTL_ARGS ls / > / dev/null 2 > / dev/nullthen NUMACTL= "numactl $NUMACTL_ARGS" else NUMACTL= "fiPIDFILEPATH= `awk-F' [: =]'- v IGNORECASE=1'/ ^ [[: blank:]] * (processManagement\.)? pidfilepath [[: blank:]] * [: =] [[: blank:]] * / {print $2}'" $CONFIGFILE "| tr-d "[: blank:]\"'| awk-Fairchild'{print $1} '`PIDDIR= `dirname $PIDFILEPATH`start () {# Make sure the default pidfile directory exists if [!-d $PIDDIR] Then install-d-m 0755-o $MONGO_USER-g $MONGO_GROUP $PIDDIR fiif test-f / sys/kernel/mm/transparent_hugepage/defrag Thenecho never > / sys/kernel/mm/transparent_hugepage/defragfi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit-f unlimited ulimit-t unlimited ulimit-v unlimited ulimit-n 64000 ulimit-m unlimited ulimit-u 64000 echo-n $"Starting mongod:" daemon-- user "$MONGO_USER"-- check $mongod "$NUMACTL $mongod $OPTIONS > / dev/null 2 > & 1" RETVAL=$? Echo [$RETVAL-eq 0] & & touch / var/lock/subsys/mongos} stop () {echo-n $"Stopping mongod:" mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? Echo [$RETVAL-eq 0] & & rm-f / var/lock/subsys/mongos} restart () {stop start} mongo_killproc () {local pid_file=$1 local procname=$2 local-I delay=300 local-I duration=10 local pid= `pidofproc-p "$pid_file}" ${procname} `kill-TERM $pid > / dev/null 2 > & 1 usleep 100000 local-I x delay 0 while [$x-le $delay] & & checkpid $pid Do sleep $duration $(($x + $duration)) done kill-KILL $pid > / dev/null 2 > & 1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? ["$RC"-eq 0] & & failure "${procname} shutdown" | | rm-f "${pid_file}"; success "${procname} shutdown" RC=$ ((! $RC)) # invert return code so we return 0 when process is dead. Return $RC} RETVAL=0case "$1" in start) start;; stop) stop;; restart | reload | force-reload) restart;; condrestart) [- f / var/lock/subsys/mongos] & & restart | |:; status) status $mongod RETVAL=$?;; *) echo "Usage: $0 {start | stop | status | restart | reload | force-reload | condrestart}" RETVAL=1esacexit $RETVAL

[root@pc1 ~] # scp / data/mongodb/config/mongos pc2:/data/mongodb/config/

[root@pc1 ~] # scp / etc/init.d/mongos pc2:/etc/init.d/

[root@pc1 ~] # scp / data/mongodb/config/mongos pc3:/data/mongodb/config/

[root@pc1 ~] # scp / etc/init.d/mongos pc3:/etc/init.d/

[root@pc1 ~] # / etc/init.d/mongos start

Starting mongod: [OK]

[root@pc1 ~] # ssh pc2'/ etc/init.d/mongos start'

Starting mongod: [OK]

[root@pc1 ~] # ssh pc3'/ etc/init.d/mongos start'

Starting mongod: [OK]

[root@pc1 ~] #

Some net friends said: need to implement the replica set of mongos routing (that is, the replication between multiple mongos routes), but from the principle analysis, I personally can not understand the function of this scheme, of course, I have also tried, found that the mongos service can not be started after setting the replica set (there is no in-depth study, please give your precious comments)

Third, realize slicing

To connect to any mongos route, do the following:

[root@pc1 ~] # mongo

MongoDB shell version: 3.2.7

Connecting to: test

Mongos > use admin

Switched to db admin

Mongos > sh.status ()

-Sharding Status-sharding version: {"_ id": 1, "minCompatibleVersion": 5, "currentVersion": 6, "clusterId": ObjectId ("577226b7511b1f96da0ddba2")} shards: active mongoses: "3.2.7": 3 balancer:Currently enabled: yesCurrently running: noFailed balancer rounds in last 5 attempts: 0Migration Results for the last 24 hours:No recent migrations databases:

Mongos > sh.addShard ("shard1/pc1:27027")

{"shardAdded": "shard1", "ok": 1}

Mongos > sh.addShard ("shard3/pc2:27026")

{"shardAdded": "shard3", "ok": 1}

Mongos > sh.addShard ("shard2/pc3:27028")

{"shardAdded": "shard2", "ok": 1}

Mongos > sh.status ()

-Sharding Status-sharding version: {"_ id": 1, "minCompatibleVersion": 5, "currentVersion": 6, "clusterId": ObjectId ("577226b7511b1f96da0ddba2")} shards: {"_ id": "shard1", "host": "shard1/pc1:27027,pc2:27027"} {"_ id": "shard2", "host": "shard2/pc1:27028,pc3:27028"} {"_ id": "shard3" "host": "shard3/pc2:27026,pc3:27026"} active mongoses: "3.2.7": 3 balancer:Currently enabled: yesCurrently running: noFailed balancer rounds in last 5 attempts: 5Last reported error: mongos specified a different config database string: stored: pc1:27029 vs given: pc1:27029,pc2:27029,pc3:27029Time of Reported error: Tue Jun 28 2016 07:34:07 GMT+0000 (UTC) Migration Results for the last 24 hours:No recent migrations databases:

Mongos > use config

Switched to db config

Mongos > db.chunks.find ()

Mongos > db.settings.find ()

{"_ id": "chunksize", "value": NumberLong (64)}

Mongos > db.settings.save ({"_ id": "chunksize", "value": NumberLong (5)}) # it is not recommended to change the value of chunk. This example is changed to 5m for example effect. Smallest chunksize (1m), default chunksize is 64m

WriteResult ({"nMatched": 1, "nUpserted": 0, "nModified": 1})

Mongos > db.settings.find ()

{"_ id": "chunksize", "value": NumberLong (5)}

Mongos > use admin

Switched to db admin

Mongos > sh.enableSharding ("test")

{"ok": 1}

Mongos > sh.shardCollection ("test.user", {uid:1})

{"collectionsharded": "test.user", "ok": 1}

Mongos > use test

Switched to db test

Mongos > db.use.ensureIndex ({uid:1})

{"raw": {"shard1/pc1:27027,pc2:27027": {"createdCollectionAutomatically": true, "numIndexesBefore": 1, "numIndexesAfter": 2, "ok": 1, "$gleStats": {"lastOpTime": Timestamp (1467102931, 2), "electionId": ObjectId ("7fffffff0000000000000001")}}, "ok": 1}

Mongos > for (iSuppli sh.status ()

-Sharding Status-sharding version: {"_ id": 1, "minCompatibleVersion": 5, "currentVersion": 6, "clusterId": ObjectId ("577234b7c92b86a15a2901d4")} shards: {"_ id": "shard1", "host": "shard1/pc1:27027,pc2:27027"} {"_ id": "shard2", "host": "shard2/pc2:27028,pc3:27028"} {"_ id": "shard3" "host": "shard3/pc1:27026,pc3:27026"} active mongoses: "3.2.7": 3 balancer:Currently enabled: yesCurrently running: noFailed balancer rounds in last 5 attempts: 0Migration Results for the last 24 hours:2: Success1: Failed with error 'aborted', from shard1 to shard2 databases: {"_ id": "test", "primary": "shard1" "partitioned": true} test.usershard key: {"uid": 1} unique: falsebalancing: truechunks:shard11shard21shard31 {"uid": {"$minKey": 1}}-- > {"uid": "user2"} on: shard2 Timestamp (2,0) {"uid": "user2"}-- > {"uid": "user8"} on: shard3 Timestamp (3) 0) {"uid": "user8"}-- > > {"uid": {"$maxKey": 1}} on: shard1 Timestamp (3,1)

Mongos > use config

Switched to db config

Mongos > show collections

Actionlogchangelogchunkscollectionsdatabaseslockpingslocksmongossettingsshardstagsversion

Mongos > db.settings.find ()

{"_ id": "chunksize", "value": NumberLong (64)}

Mongos > db.shards.find ()

{"_ id": "shard1", "host": "shard1/pc1:27027,pc2:27027"} {"_ id": "shard3", "host": "shard3/pc1:27026,pc3:27026"} {"_ id": "shard2", "host": "shard2/pc2:27028,pc3:27028"}

Mongos > db.databases.find ()

{"_ id": "test", "primary": "shard1", "partitioned": true}

Mongos > db.chunks.find ()

{"_ id": "test.user-uid_MinKey", "lastmod": Timestamp (2,0), "lastmodEpoch": ObjectId ("57723675c92b86a15a290209"), "ns": "test.user", "min": {"uid": {"$minKey": 1}}, "max": {"uid": "user2"} "shard": "shard2"} {"_ id": "test.user-uid_\" user2\ "," lastmod ": Timestamp (3,0)," lastmodEpoch ": ObjectId (" 57723675c92b86a15a290209 ")," ns ":" test.user "," min ": {" uid ":" user2 "}," max ": {" uid ":" user8 "} "shard": "shard3"} {"_ id": "test.user-uid_\" user8\ "," lastmod ": Timestamp (3,1)," lastmodEpoch ": ObjectId (" 57723675c92b86a15a290209 ")," ns ":" test.user "," min ": {" uid ":" user8 "}," max ": {" uid ": {" $maxKey ": 1}} "shard": "shard1"}

Mongos > exit

IV. Supplementary orders

Db.runCommand ({addshard: "shard1/pc1:27027,pc2:27027,pc3:27027", name: "shard1"})

Db.runCommand ({addshard: "shard2/pc1:27028,pc2:27028,pc3:27028", name: "shard2"})

Db.runCommand ({addshard: "shard3/pc1:27026,pc2:27026,pc3:27026", name: "shard3"})

Use admin

Db.runCommand ({enablesharding: "db_name"})

Db.runCommand ({shardcollection: "db_name.collection_name", key: {fields_name: "hashed"}})

Db.runCommand ({shardcollection: "db_name.collection_name", key: {fields_name:1}})

Sh.shardCollection ("db_name.collection_name", {fields_name1:1,fields_name2:1})

Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.

Views: 0

*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.

Share To

Database

Wechat

© 2024 shulou.com SLNews company. All rights reserved.

12
Report