In addition to Weibo, there is also WeChat
Please pay attention
WeChat public account
Shulou
2025-01-18 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Database >
Share
Shulou(Shulou.com)06/01 Report--
Brief introduction:
Replication set:
1: standard node:
Participate in the primary election
2: passive node:
Can only become a secend and do not participate in the election.
3: arbitration node:
Responsible for voting, no data storage
Experimental environment: 2 standard nodes, 1 passive node and 1 arbitration point
Specific experimental steps: (the first half of the experiment is the operation of the last experiment)
[root@localhost] # mkdir-p / data/mongodb/mongodb {2pm 3pm 4}
[root@localhost] # mkdir-p / data/mongodb/logs
[root@localhost ~] # touch / data/mongodb/logs/mongodb {2pm 3pm 4} .log
[root@localhost ~] # chmod 777 / data/mongodb/logs/*.log
Edit configuration file
[root@localhost logs] # vim / etc/mongod.conf
Enable replication and customize the name.
Replication:
ReplSetName: kgcrs
[root@localhost yum.repos.d] # mongod-f / etc/mongod.conf-- shutdown # disable the service
[root@localhost yum.repos.d] # mongod-f / etc/mongod.conf # enable the service
Replication produces four instances
[root@localhost yum.repos.d] # cp-p / etc/mongod.conf / etc/mongod2.conf
[root@localhost yum.repos.d] # cp-p / etc/mongod.conf / etc/mongod3.conf
[root@localhost yum.repos.d] # cp-p / etc/mongod.conf / etc/mongod4.conf
[root@localhost yum.repos.d] # vim / etc/mongod2.conf
# three modifications
Path: / data/mongodb/logs/mongodb2.log
DbPath: / data/mongodb/mongodb2
Port: 27018
# modify the other several
[root@localhost yum.repos.d] # cp-p / etc/mongod2.conf / etc/mongod3.conf
[root@localhost yum.repos.d] # vim / etc/mongod3.conf
(the same three, just change the serial number.)
[root@localhost yum.repos.d] # cp-p / etc/mongod2.conf / etc/mongod4.conf
[root@localhost yum.repos.d] # vim / etc/mongod4.conf
# start after the modification is completed
[root@localhost yum.repos.d] # mongod-f / etc/mongod2.conf
[root@localhost yum.repos.d] # mongod-f / etc/mongod3.conf
[root@localhost yum.repos.d] # mongod-f / etc/mongod4.conf
# View port enabled
[root@localhost yum.repos.d] # netstat-ntap
# Log in to MongoDB
# replication set
Cfg= {"_ id": "kgcrs", "members": [{"_ id": 0, "host": "192.168.120.136kgcrs 27017", "priority": 100}, {"_ id": 1, "host": "192.168.120.136kgcrs 27018", "priority": 100}, {"_ id": 2, "host": "192.168.120.136 kgcrs 27019", "priority": 0}, {"_ id": 3 "host": "192.168.120.136 true 27020", "arbiterOnly": true}]}
{
"_ id": "kgcrs"
"members": [
{
"_ id": 0
"host": "192.168.120.136 purl 27017"
"priority":
}
{
"_ id": 1
"host": "192.168.120.136 purl 27018"
"priority":
}
{
"_ id": 2
"host": "192.168.120.136 purl 27019"
"priority": 0
}
{
"_ id": 3
"host": "192.168.120.136 purl 27020"
"arbiterOnly": true
}
]
}
# initialization
Rs.initiate (cfg)
{
"ok": 1
OperationTime: Timestamp (1536821356, 1)
"$clusterTime": {
ClusterTime: Timestamp (1536821356, 1)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
}
# check identity
Kgcrs:PRIMARY > rs.isMaster ()
{
"hosts": [
"192.168.120.136purl 27017"
"192.168.120.136purl 27018"
]
"passives": [
"192.168.120.136purl 27019"
]
"arbiters": [
"192.168.120.136purl 27020"
]
"setName": "kgcrs"
"setVersion": 1
"ismaster": true
"secondary": false
"primary": "192.168.120.136 purl 27017"
"me": "192.168.120.136 purl 27017"
"electionId": ObjectId ("7fffffff0000000000000001")
"lastWrite": {
"opTime": {
Ts: Timestamp (1536821508, 1)
"t": NumberLong (1)
}
LastWriteDate: ISODate ("2018-09-13T06:51:48Z")
"majorityOpTime": {
Ts: Timestamp (1536821508, 1)
"t": NumberLong (1)
}
MajorityWriteDate: ISODate ("2018-09-13T06:51:48Z")
}
"maxBsonObjectSize": 16777216
"maxMessageSizeBytes": 48000000
"maxWriteBatchSize": 100000
LocalTime: ISODate ("2018-09-13T06:51:54.761Z")
"logicalSessionTimeoutMinutes": 30
"minWireVersion": 0
"maxWireVersion": 6
"readOnly": false
"ok": 1
OperationTime: Timestamp (1536821508, 1)
"$clusterTime": {
ClusterTime: Timestamp (1536821508, 1)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
}
-- log operations--
# create the library kgc and enter kgc
Kgcrs:PRIMARY > use kgc
Switched to db kgc
Create a T1 collection in the kgc library and insert two pieces of information
Kgcrs:PRIMARY > db.t1.insert ({"id": 1, "name": "jack"})
WriteResult ({"nInserted": 1})
Kgcrs:PRIMARY > db.t1.insert ({"id": 2, "name": "tom"})
WriteResult ({"nInserted": 1})
# View data
Kgcrs:PRIMARY > db.t1.find ()
{"_ id": ObjectId ("5b9a0d9efa7413d9cb629b39"), "id": 1, "name": "jack"}
{"_ id": ObjectId ("5b9a0dacfa7413d9cb629b3a"), "id": 2, "name": "tom"}
# after the database operation, we start the log operation
# enter local
Kgcrs:PRIMARY > use local
Switched to db local
# View the collection
Kgcrs:PRIMARY > show collections
Me
Oplog.rs
Replset.election
Replset.minvalid
Startup_log
System.replset
System.rollback.id
# View log files
Kgcrs:PRIMARY > db.oplog.rs.find ()
(all actions are recorded in this log)
# We all know that the master node can operate, and the slave node can only act as a replication set (not operable), so it cannot be viewed, but there is a command to view it.
# after logging in to the slave node, I found that I could not view it.
Kgcrs:SECONDARY > show dbs
2018-09-13T15:28:51.529+0800 E QUERY [thread1] Error: listDatabases failed: {
OperationTime: Timestamp (1536823728, 1)
"ok": 0
"errmsg": "not master and slaveOk=false"
"code": 13435
"codeName": "NotMasterNoSlaveOk"
"$clusterTime": {
ClusterTime: Timestamp (1536823728, 1)
"signature": {
"hash": BinData (0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA=")
"keyId": NumberLong (0)
}
}
}: _ getErrorWithCode@src/mongo/shell/utils.js:25:13br/ > _ getErrorWithCode@src/mongo/shell/utils.js:25:13
Br/ > shellHelper.show@src/mongo/shell/utils.js:849:19
Br/ > @ (shellhelp2): 1:1
Kgcrs:SECONDARY > rs.slaveOk ()
# check it out (OK)
Kgcrs:SECONDARY > show dbs
Admin 0.000GB
Config 0.000GB
Kgc 0.000GB
# I can't remember the command. You can check the help.
Kgcrs:SECONDARY > rs.help ()
# View log time and size
Kgcrs:SECONDARY > rs.printReplicationInfo ()
Configured oplog size: 990MB
Log length start to end: 2812secs (0.78hrs)
Oplog first event time Thu Sep 13 2018 14:49:16 GMT+0800 (CST)
Oplog last event time Thu Sep 13 2018 15:36:08 GMT+0800 (CST)
Now Thu Sep 13 2018 15:36:12 GMT+0800 (CST)
# View node and replication time
Kgcrs:SECONDARY > rs.printSlaveReplicationInfo ()
Source: 192.168.120.136:27018
SyncedTo Thu Sep 13 2018 15:40:28 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
Source: 192.168.120.136:27019
SyncedTo Thu Sep 13 2018 15:40:28 GMT+0800 (CST)
0 secs (0 hrs) behind the primary
# resize log files (offline upgrade)
# enter admin first
Kgcrs:SECONDARY > use admin
Switched to db admin
# disable the service
Kgcrs:SECONDARY > db.shutdownServer ()
Server should be down...
# Log out the startup parameters related to Replia set, and modify the port number to 27028
# enter the configuration file
[root@localhost ~] # vim / etc/mongod2.conf
# replication:
# replSetName: kgcrs comments out these two items first (exit the replication set)
Change port 27018 to 27028
# restart the service
[root@localhost] # mongod-f / etc/mongod2.conf
# complete all oplog records of the current node
[root@localhost] # mongodump-- port 27028-- db local-- collection 'oplog.rs'
2018-09-13T16:01:34.259+0800 writing local.oplog.rs to
2018-09-13T16:01:34.280+0800 done dumping local.oplog.rs (347documents)
[root@localhost] # mongo-- port 27028
Use local
Switched to db local
# delete the original
Db.oplog.rs.drop ()
True
# create a new one and set the size (2048m)
Db.runCommand ({create: "oplog.rs", capped:true,size: (210241024 / 1024)})
{"ok": 1}
# disable the service
Use admin
Switched to db admin
Db.shutdownServer ()
# reenter configuration file
[root@localhost ~] # vim / etc/mongod2.conf
# remove comments and add size
Replication:
ReplSetName: kgcrs
OplogSizeMB: 2048
# change the port back to 27018
# enable the service
[root@localhost] # mongod-f / etc/mongod2.conf
# enter 27018
# View size
Kgcrs:SECONDARY > rs.printReplicationInfo ()
Configured oplog size: 2048MB
Log length start to end: 100secs (0.03hrs)
Oplog first event time Thu Sep 13 2018 16:10:29 GMT+0800 (CST)
Oplog last event time Thu Sep 13 2018 16:12:09 GMT+0800 (CST)
Now Thu Sep 13 2018 16:12:16 GMT+0800 (CST)
-- deploy certified replication-
# j enter the primary node
Kgcrs:PRIMARY > use admin
Switched to db admin
# create a user-set password
Kgcrs:PRIMARY > db.createUser ({"user": "root", "pwd": "123"," roles ": [" root "]})
Successfully added user: {"user": "root", "roles": ["root"]}
# Editing configuration file
[root@localhost ~] # vim / etc/mongod.conf
# Open security and add two statements
Security:
KeyFile: / usr/bin/kgcrskeyl
ClusterAuthMode: keyFile
# modify the following synchronization operations ()
[root@localhost ~] # vim / etc/mongod2.conf
Security:
KeyFile: / usr/bin/kgcrskey2
ClusterAuthMode: keyFile
[root@localhost ~] # vim / etc/mongod3.conf
Security:
KeyFile: / usr/bin/kgcrskey3
ClusterAuthMode: keyFile
[root@localhost ~] # vim / etc/mongod4.conf
Security:
KeyFile: / usr/bin/kgcrskey4
ClusterAuthMode: keyFile
# write
[root@localhost ~] # cd / usr/bin/
[root@localhost bin] # echo "kgcrskey" > kgcrskey1
[root@localhost bin] # echo "kgcrskey" > kgcrskey2
[root@localhost bin] # echo "kgcrskey" > kgcrskey4
[root@localhost bin] # echo "kgcrskey" > kgcrskey3
[root@localhost bin] # mongod-f / etc/mongod.conf-- shutdown
Killing process with pid: 50970
[root@localhost bin] # chmod 600 kgc*
[root@localhost bin] # mongod-f / etc/mongod.conf
# restart four nodes in turn
# enter primary
Kgcrs:PRIMARY > show dbs # cannot view the database
Kgcrs:PRIMARY > rs.status () # unable to view replication set
Kgcrs:PRIMARY > use admin # authentication login
Kgcrs:PRIMARY > db.auth ("root", "123")
# Log in successfully. You can check
Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.
Views: 0
*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.
Continue with the installation of the previous hadoop.First, install zookooper1. Decompress zookoope
"Every 5-10 years, there's a rare product, a really special, very unusual product that's the most un
© 2024 shulou.com SLNews company. All rights reserved.