-
Type: Bug
-
Resolution: Works as Designed
-
Priority: Major - P3
-
None
-
Affects Version/s: 4.4.0
-
Component/s: None
-
None
-
Fully Compatible
-
ALL
4.4.0 mongos fails to retry a failed retryable write on new primary after the old primary is shutdown. Instead of retrying on the new primary, mongos seems to block waiting until the old primary is brought back online.
To reproduce:
- Start a cluster. Mine has 1 mongos and a 3 member PSA shard.
- Start a workload app which performs retryable writes for 10 seconds. In my repro there are 4 threads each running client.test.test.insert_many([{}, {}]).
- Shutdown the shard primary with {shutdown:1, force:1} a few seconds into the workload.
- See that the workload app hangs unexpectedly.
- Verify that the shard has a new primary:
PRIMARY> rs.status() { "set" : "4bbb2116-552f-4213-b831-117bf4a4fcf4", "date" : ISODate("2020-08-31T20:11:24.867Z"), "myState" : 1, "term" : NumberLong(21), "syncSourceHost" : "", "syncSourceId" : -1, "heartbeatIntervalMillis" : NumberLong(2000), "majorityVoteCount" : 2, "writeMajorityCount" : 2, "votingMembersCount" : 3, "writableVotingMembersCount" : 2, "optimes" : { "lastCommittedOpTime" : { "ts" : Timestamp(1598903941, 196), "t" : NumberLong(20) }, "lastCommittedWallTime" : ISODate("2020-08-31T19:59:01.956Z"), "readConcernMajorityOpTime" : { "ts" : Timestamp(1598903941, 196), "t" : NumberLong(20) }, "readConcernMajorityWallTime" : ISODate("2020-08-31T19:59:01.956Z"), "appliedOpTime" : { "ts" : Timestamp(1598904684, 1), "t" : NumberLong(21) }, "durableOpTime" : { "ts" : Timestamp(1598904684, 1), "t" : NumberLong(21) }, "lastAppliedWallTime" : ISODate("2020-08-31T20:11:24.143Z"), "lastDurableWallTime" : ISODate("2020-08-31T20:11:24.143Z") }, "lastStableRecoveryTimestamp" : Timestamp(1598903941, 196), "electionCandidateMetrics" : { "lastElectionReason" : "stepUpRequestSkipDryRun", "lastElectionDate" : ISODate("2020-08-31T19:59:02.021Z"), "electionTerm" : NumberLong(21), "lastCommittedOpTimeAtElection" : { "ts" : Timestamp(1598903941, 196), "t" : NumberLong(20) }, "lastSeenOpTimeAtElection" : { "ts" : Timestamp(1598903941, 204), "t" : NumberLong(20) }, "numVotesNeeded" : 2, "priorityAtElection" : 1, "electionTimeoutMillis" : NumberLong(10000), "priorPrimaryMemberId" : 0, "numCatchUpOps" : NumberLong(0), "newTermStartDate" : ISODate("2020-08-31T19:59:02.056Z") }, "electionParticipantMetrics" : { "votedForCandidate" : true, "electionTerm" : NumberLong(20), "lastVoteDate" : ISODate("2020-08-31T19:58:41.241Z"), "electionCandidateMemberId" : 0, "voteReason" : "", "lastAppliedOpTimeAtElection" : { "ts" : Timestamp(1598903920, 1), "t" : NumberLong(19) }, "maxAppliedOpTimeInSet" : { "ts" : Timestamp(1598903920, 1), "t" : NumberLong(19) }, "priorityAtElection" : 1 }, "members" : [ { "_id" : 0, "name" : "localhost:27019", "health" : 0, "state" : 8, "stateStr" : "(not reachable/healthy)", "uptime" : 0, "optime" : { "ts" : Timestamp(0, 0), "t" : NumberLong(-1) }, "optimeDurable" : { "ts" : Timestamp(0, 0), "t" : NumberLong(-1) }, "optimeDate" : ISODate("1970-01-01T00:00:00Z"), "optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"), "lastHeartbeat" : ISODate("2020-08-31T20:11:24.061Z"), "lastHeartbeatRecv" : ISODate("2020-08-31T19:59:02.556Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused", "syncSourceHost" : "", "syncSourceId" : -1, "infoMessage" : "", "configVersion" : 2, "configTerm" : 20 }, { "_id" : 1, "name" : "localhost:27020", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 779, "optime" : { "ts" : Timestamp(1598904684, 1), "t" : NumberLong(21) }, "optimeDate" : ISODate("2020-08-31T20:11:24Z"), "syncSourceHost" : "", "syncSourceId" : -1, "infoMessage" : "", "electionTime" : Timestamp(1598903942, 1), "electionDate" : ISODate("2020-08-31T19:59:02Z"), "configVersion" : 2, "configTerm" : 21, "self" : true, "lastHeartbeatMessage" : "" }, { "_id" : 2, "name" : "localhost:27021", "health" : 1, "state" : 7, "stateStr" : "ARBITER", "uptime" : 776, "lastHeartbeat" : ISODate("2020-08-31T20:11:23.193Z"), "lastHeartbeatRecv" : ISODate("2020-08-31T20:11:24.176Z"), "pingMs" : NumberLong(0), "lastHeartbeatMessage" : "", "syncSourceHost" : "", "syncSourceId" : -1, "infoMessage" : "", "configVersion" : 2, "configTerm" : 21 } ], "ok" : 1, "$gleStats" : { "lastOpTime" : Timestamp(0, 0), "electionId" : ObjectId("7fffffff0000000000000015") }, "lastCommittedOpTime" : Timestamp(1598903941, 196), "$configServerState" : { "opTime" : { "ts" : Timestamp(1598904679, 1), "t" : NumberLong(1) } }, "$clusterTime" : { "clusterTime" : Timestamp(1598904684, 1), "signature" : { "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId" : NumberLong(0) } }, "operationTime" : Timestamp(1598904684, 1) }
- The write operations are blocked in mongos. Looking at the mongos log:
tail -f /var/folders/t0/h63967912lv9gj5m78g80wk80000gp/T/mongos-QV5wbe/mongos.log {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"-", "id":4333222, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM received failed isMaster","attr":{"host":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","isMasterReply":"{}"}} {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"NETWORK", "id":4712102, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Host failed in replica set","attr":{"replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","host":"localhost:27019","error":{"code":6,"codeName":"HostUnreachable","errmsg":"Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"},"action":{"dropConnections":true,"requestImmediateCheck":true}}} {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27021","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27019","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"-", "id":4333218, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Rescheduling the next replica set monitoring request","attr":{"replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","host":"localhost:27019","delayMillis":500}} {"t":{"$date":"2020-08-31T13:00:18.937-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27020","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:19.385-07:00"},"s":"I", "c":"CONNPOOL", "id":22576, "ctx":"TaskExecutorPool-0","msg":"Connecting","attr":{"hostAndPort":"localhost:27019"}} {"t":{"$date":"2020-08-31T13:00:19.387-07:00"},"s":"I", "c":"CONNPOOL", "id":22572, "ctx":"TaskExecutorPool-0","msg":"Dropping all pooled connections","attr":{"hostAndPort":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"}} {"t":{"$date":"2020-08-31T13:00:19.441-07:00"},"s":"I", "c":"-", "id":4333222, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM received failed isMaster","attr":{"host":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","isMasterReply":"{}"}} {"t":{"$date":"2020-08-31T13:00:19.441-07:00"},"s":"I", "c":"NETWORK", "id":4712102, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Host failed in replica set","attr":{"replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","host":"localhost:27019","error":{"code":6,"codeName":"HostUnreachable","errmsg":"Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"},"action":{"dropConnections":true,"requestImmediateCheck":false,"outcome":{"host":"localhost:27019","success":false,"errorMessage":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"}}}} {"t":{"$date":"2020-08-31T13:00:19.945-07:00"},"s":"I", "c":"-", "id":4333222, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM received failed isMaster","attr":{"host":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","isMasterReply":"{}"}} {"t":{"$date":"2020-08-31T13:00:19.945-07:00"},"s":"I", "c":"NETWORK", "id":4712102, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Host failed in replica set","attr":{"replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","host":"localhost:27019","error":{"code":6,"codeName":"HostUnreachable","errmsg":"Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"},"action":{"dropConnections":true,"requestImmediateCheck":true}}} {"t":{"$date":"2020-08-31T13:00:19.946-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27021","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:19.946-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27019","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:19.946-07:00"},"s":"I", "c":"-", "id":4333218, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"Rescheduling the next replica set monitoring request","attr":{"replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","host":"localhost:27019","delayMillis":499}} {"t":{"$date":"2020-08-31T13:00:19.946-07:00"},"s":"I", "c":"-", "id":4333227, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM monitoring host in expedited mode until we detect a primary","attr":{"host":"localhost:27020","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4"}} {"t":{"$date":"2020-08-31T13:00:19.998-07:00"},"s":"I", "c":"COMMAND", "id":51803, "ctx":"conn210","msg":"Slow query","attr":{"type":"command","ns":"admin.$cmd","command":{"ismaster":1,"topologyVersion":{"processId":{"$oid":"5f4d4df78fcf85824a2e2626"},"counter":0},"maxAwaitTimeMS":10000,"$clusterTime":{"clusterTime":{"$timestamp":{"t":1598903939,"i":2}},"signature":{"hash":{"$binary":{"base64":"AAAAAAAAAAAAAAAAAAAAAAAAAAA=","subType":"0"}},"keyId":0}},"$db":"admin","$readPreference":{"mode":"primary"}},"numYields":0,"reslen":436,"protocol":"op_msg","durationMillis":9999}} {"t":{"$date":"2020-08-31T13:00:20.342-07:00"},"s":"I", "c":"SHARDING", "id":20997, "ctx":"Uptime-reporter","msg":"Refreshed RWC defaults","attr":{"newDefaults":{}}} {"t":{"$date":"2020-08-31T13:00:20.385-07:00"},"s":"I", "c":"CONNPOOL", "id":22576, "ctx":"TaskExecutorPool-0","msg":"Connecting","attr":{"hostAndPort":"localhost:27019"}} {"t":{"$date":"2020-08-31T13:00:20.387-07:00"},"s":"I", "c":"CONNPOOL", "id":22572, "ctx":"TaskExecutorPool-0","msg":"Dropping all pooled connections","attr":{"hostAndPort":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused"}} {"t":{"$date":"2020-08-31T13:00:20.446-07:00"},"s":"I", "c":"-", "id":4333222, "ctx":"ReplicaSetMonitor-TaskExecutor","msg":"RSM received failed isMaster","attr":{"host":"localhost:27019","error":"HostUnreachable: Error connecting to localhost:27019 (::1:27019) :: caused by :: Connection refused","replicaSet":"4bbb2116-552f-4213-b831-117bf4a4fcf4","isMasterReply":"{}"}}
- Verify that the same mongos can still process new write operations:
MongoDB Enterprise mongos> db.test.insertOne({}) { "acknowledged" : true, "insertedId" : ObjectId("5f4d5cbaefe4cecb41b89340") }
- After restarting the old primary mongod, the stalled operations on mongos succeed.