In addition to Weibo, there is also WeChat
Please pay attention
WeChat public account
Shulou
2025-01-18 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Database >
Share
Shulou(Shulou.com)06/01 Report--
1. Install mongo on the new member machine and start shard1
[root@ip-10-1-2-183 etc] # more shard1.conf
Logpath=/usr/local/mongodb/logs/mongo_shard1.log
Logappend=true # need logrotae scripts
Fork=true
Journal=true
Port=27019
# vvvvv = true
# diaglog = 3
Dbpath=/usr/local/mongodb/shard1
Pidfilepath=/usr/local/mongodb/logs/mongo_shard1.pid
Bind_ip=10.1.2.183
ReplSet=shard1
Shardsvr=true
/ usr/local/mongodb/bin/mongod-f shard1.conf
two。 Add a new replica set member to the primary node
[root@ip-10-1-2-32 etc] # / usr/local/mongodb/bin/mongo 10.1.2.32 usr/local/mongodb/bin/mongo 27019
Shard1:PRIMARY > rs.add ({_ id: 3, host: "10.1.2.183 purl 27019"})
{
"ok": 1
OperationTime: Timestamp (1525752728, 3)
"$gleStats": {
"lastOpTime": {
Ts: Timestamp (1525752728, 3)
"t": NumberLong (1)
}
"electionId": ObjectId ("7fffffff0000000000000001")
}
"$clusterTime": {
ClusterTime: Timestamp (1525752728, 3)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
"$configServerState": {
"opTime": {
Ts: Timestamp (1525752728, 2)
"t": NumberLong (1)
}
}
}
Shard1:PRIMARY > rs.isMaster ()
{
"hosts": [
"10.1.2.32 purl 27019"
"10.1.2.68 Suzhou 27019"
"10.1.2.175purl 27019"
"10.1.2.183purl 27019"
]
"setName": "shard1"
"setVersion": 4
"ismaster": true
"secondary": false
"primary": "10.1.2.32 purl 27019"
"me": "10.1.2.32 purl 27019"
"electionId": ObjectId ("7fffffff0000000000000001")
"lastWrite": {
"opTime": {
Ts: Timestamp (1525752728, 3)
"t": NumberLong (1)
}
LastWriteDate: ISODate ("2018-05-08T04:12:08Z")
"majorityOpTime": {
Ts: Timestamp (1525752728, 3)
"t": NumberLong (1)
}
MajorityWriteDate: ISODate ("2018-05-08T04:12:08Z")
}
"maxBsonObjectSize": 16777216
"maxMessageSizeBytes": 48000000
"maxWriteBatchSize": 100000
LocalTime: ISODate ("2018-05-08T04:12:12.282Z")
"logicalSessionTimeoutMinutes": 30
"minWireVersion": 0
"maxWireVersion": 6
"readOnly": false
"ok": 1
OperationTime: Timestamp (1525752728, 3)
"$gleStats": {
"lastOpTime": {
Ts: Timestamp (1525752728, 3)
"t": NumberLong (1)
}
"electionId": ObjectId ("7fffffff0000000000000001")
}
"$clusterTime": {
ClusterTime: Timestamp (1525752729, 3)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
"$configServerState": {
"opTime": {
Ts: Timestamp (1525752729, 3)
"t": NumberLong (1)
}
}
}
Shard1:PRIMARY > rs.conf ()
{
"_ id": "shard1"
"version": 4
"protocolVersion": NumberLong (1)
"members": [
{
"_ id": 0
"host": "10.1.2.32 purl 27019"
"arbiterOnly": false
"buildIndexes": true
"hidden": false
"priority": 1
"tags": {
}
"slaveDelay": NumberLong (0)
"votes": 1
}
{
"_ id": 1
"host": "10.1.2.68 purl 27019"
"arbiterOnly": false
"buildIndexes": true
"hidden": false
"priority": 1
"tags": {
}
"slaveDelay": NumberLong (0)
"votes": 1
}
{
"_ id": 2
"host": "10.1.2.175 purl 27019"
"arbiterOnly": false
"buildIndexes": true
"hidden": false
"priority": 1
"tags": {
}
"slaveDelay": NumberLong (0)
"votes": 1
}
{
"_ id": 3
"host": "10.1.2.183 purl 27019"
"arbiterOnly": false
"buildIndexes": true
"hidden": false
"priority": 1
"tags": {
}
"slaveDelay": NumberLong (0)
"votes": 1
}
]
"settings": {
"chainingAllowed": true
"heartbeatIntervalMillis": 2000
"heartbeatTimeoutSecs": 10
"electionTimeoutMillis": 10000
"catchUpTimeoutMillis":-1
"catchUpTakeoverDelayMillis": 30000
"getLastErrorModes": {
}
"getLastErrorDefaults": {
"w": 1
"wtimeout": 0
}
"replicaSetId": ObjectId ("5aeab9557a9235efbe4a9b59")
} shard1:PRIMARY > rs.status ()
{
"set": "shard1"
Date: ISODate ("2018-05-08T05:38:35.245Z")
"myState": 1
"term": NumberLong (1)
"heartbeatIntervalMillis": NumberLong (2000)
"optimes": {
"lastCommittedOpTime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"readConcernMajorityOpTime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"appliedOpTime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"durableOpTime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
}
"members": [
{
"_ id": 0
"name": "10.1.2.32 purl 27019"
"health": 1
"state": 1
"stateStr": "PRIMARY"
"uptime": 425793
"optime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
OptimeDate: ISODate ("2018-05-08T05:38:27Z")
ElectionTime: Timestamp (1525332319, 1)
ElectionDate: ISODate ("2018-05-03T07:25:19Z")
"configVersion": 4
"self": true
}
{
"_ id": 1
"name": "10.1.2.68 purl 27019"
"health": 1
"state": 2
"stateStr": "SECONDARY"
"uptime": 425606
"optime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"optimeDurable": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
OptimeDate: ISODate ("2018-05-08T05:38:27Z")
OptimeDurableDate: ISODate ("2018-05-08T05:38:27Z")
LastHeartbeat: ISODate ("2018-05-08T05:38:34.014Z")
LastHeartbeatRecv: ISODate ("2018-05-08T05:38:34.014Z")
"pingMs": NumberLong (0)
"syncingTo": "10.1.2.32 purl 27019"
"configVersion": 4
}
{
"_ id": 2
"name": "10.1.2.175 purl 27019"
"health": 1
"state": 2
"stateStr": "SECONDARY"
"uptime": 425606
"optime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"optimeDurable": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
OptimeDate: ISODate ("2018-05-08T05:38:27Z")
OptimeDurableDate: ISODate ("2018-05-08T05:38:27Z")
LastHeartbeat: ISODate ("2018-05-08T05:38:34.014Z")
LastHeartbeatRecv: ISODate ("2018-05-08T05:38:34.016Z")
"pingMs": NumberLong (0)
"syncingTo": "10.1.2.183 purl 27019"
"configVersion": 4
}
{
"_ id": 3
"name": "10.1.2.183 purl 27019"
"health": 1
"state": 2
"stateStr": "SECONDARY"
"uptime": 5184
"optime": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
"optimeDurable": {
Ts: Timestamp (1525757907, 1)
"t": NumberLong (1)
}
OptimeDate: ISODate ("2018-05-08T05:38:27Z")
OptimeDurableDate: ISODate ("2018-05-08T05:38:27Z")
LastHeartbeat: ISODate ("2018-05-08T05:38:34.016Z")
LastHeartbeatRecv: ISODate ("2018-05-08T05:38:33.567Z")
"pingMs": NumberLong (0)
"syncingTo": "10.1.2.68 purl 27019"
"configVersion": 4
}
]
"ok": 1
OperationTime: Timestamp (1525757907, 1)
"$gleStats": {
"lastOpTime": Timestamp (0,0)
"electionId": ObjectId ("7fffffff0000000000000001")
}
"$clusterTime": {
ClusterTime: Timestamp (1525757911, 2)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
"$configServerState": {
"opTime": {
Ts: Timestamp (1525757911, 2)
"t": NumberLong (1)
}
}
}
You can see that the new members have joined the replication set.
3.shard2 config plus members are the same (process is brief)
4. Check whether you have joined successfully.
Log in to the mongos port to see [root@ip-10-1-2-32 etc] # / usr/local/mongodb/bin/mongo 10.1.2.32 etc 30000
MongoDB shell version v3.6.4
Connecting to: mongodb://10.1.2.32:30000/test
MongoDB server version: 3.6.4
Server has startup warnings:
2018-05-03T07:47:09.379+0000 I CONTROL [main]
2018-05-03T07:47:09.379+0000 I CONTROL [main] * * WARNING: Access control is not enabled for the database.
2018-05-03T07:47:09.379+0000 I CONTROL [main] * * Read and write access to data and configuration is unrestricted.
2018-05-03T07:47:09.379+0000 I CONTROL [main] * * WARNING: You are running this process as the root user, which is not recommended.
2018-05-03T07:47:09.379+0000 I CONTROL [main]
Mongos > db.printShardingStatus ()
-Sharding Status
Sharding version: {
"_ id": 1
"minCompatibleVersion": 5
"currentVersion": 6
"clusterId": ObjectId ("5aeabc4db192a4fefca1c888")
}
Shards:
{"_ id": "shard1", "host": "shard1/10.1.2.175:27019,10.1.2.183:27019,10.1.2.32:27019,10.1.2.68:27019", "state": 1}-the new replica set members are automatically synchronized to the sharding information
{"_ id": "shard2", "host": "shard2/10.1.2.175:27018,10.1.2.32:27018,10.1.2.68:27018", "state": 1}
{"_ id": "shard3", "host": "shard3/10.1.2.175:27017,10.1.2.32:27017,10.1.2.68:27017", "state": 1}
Active mongoses:
"3.6.4": 3
Autosplit:
Currently enabled: yes
Balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
Databases:
{"_ id": "config", "primary": "config", "partitioned": true}
Config.system.sessions
Shard key: {"_ id": 1}
Unique: false
Balancing: true
Chunks:
Shard1 1
{"_ id": {"$minKey": 1}}-- > > {"_ id": {"$maxKey": 1}} on: shard1 Timestamp (1,0)
{"_ id": "test", "primary": "shard2", "partitioned": true}
Test.users
Shard key: {"_ id": 1}
Unique: false
Balancing: true
Chunks:
Shard2 1
{"_ id": {"$minKey": 1}}-- > > {"_ id": {"$maxKey": 1}} on: shard2 Timestamp (1,0)
Mongos >
4. Finally, modify the configuration file.
To avoid restart problems, write to the configuration file [root@ip-10-1-2-32 etc] # more mongos.conf
Logpath=/usr/local/mongodb/logs/mongos.log
Logappend=true # need logrotae scripts
Fork=true
Port=30000
Pidfilepath=/usr/local/mongodb/logs/mongos.pid
Configdb=conf/10.1.2.32:27000,10.1.2.68:27000,10.1.2.175:27000,10.1.2.183:27000
Bind_ip=10.1.2.32
Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.
Views: 0
*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.
Continue with the installation of the previous hadoop.First, install zookooper1. Decompress zookoope
"Every 5-10 years, there's a rare product, a really special, very unusual product that's the most un
© 2024 shulou.com SLNews company. All rights reserved.