-
Type: Bug
-
Resolution: Unresolved
-
Priority: Major - P3
-
None
-
Affects Version/s: None
-
Component/s: Replication
-
Replication
-
ALL
-
(copied to CRM)
-
0
The update code enforces different size constraints when modifiers are applied, including:
- The document size cannot exceed the user doc size limit
- The array backfill amount cannot exceed the array backfill limit
- Other constraints?
One of these constraints may be violated when an update is applied to a future version of a document present on a secondary, even though the constraint was not violated when the operation was performed on the primary. If this occurs, data from another field may not be properly replicated to the secondary. And if during initial sync of a replica set the initial sync may fail.
Tests:
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 326dffb..0bfafc6 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -1332,6 +1332,63 @@ public: } }; + +class DocSizeLimitExceededOnReplay : public Base { +public: + DocSizeLimitExceededOnReplay() : _bigString(10 * 1024 * 1024, 'c') {} + void doIt() const { + _client.update(ns(), BSONObj(), fromjson("{$set:{z:2}}")); + // This update will work when applied initially, but when applied to the new + // document with 'b' set, the total doc size will exceed the max user size. + _client.update(ns(), BSONObj(), BSON("$set" << BSON("a" << _bigString << "z" << 3))); + _client.update(ns(), BSONObj(), fromjson("{$unset:{a:1}}")); + _client.update(ns(), BSONObj(), BSON("$set" << BSON("b" << _bigString))); + } + using ReplTests::Base::check; + void check() const { + ASSERT_EQUALS(1, count()); + ASSERT_EQUALS(3, one(BSON("_id" << 0))["z"].number()); + } + void reset() const { + deleteAll(ns()); + insert(BSON("_id" << 0 << "z" << 1)); + } + +private: + string _bigString; +}; + + +class ArrayBackfillLimitExceededOnReplay : public Base { +public: + void doIt() const { + _client.update(ns(), BSONObj(), fromjson("{$set:{z:1}}")); + // Modify an array field with a high index value. Will work when applied to the + // initial doc, but not when applied to the future doc where a is [], due to the + // array backfill limit implementation. + _client.update(ns(), BSONObj(), fromjson("{$set:{'a.1599999':false,z:2}}")); + _client.update(ns(), BSONObj(), fromjson("{$set:{a:[]}}")); + } + + using ReplTests::Base::check; + + void check() const { + ASSERT_EQUALS(1, count()); + ASSERT_EQUALS(2, one(BSON("_id" << 0))["z"].number()); + } + + void reset() const { + deleteAll(ns()); + // Insert a doc with a large array in the 'a' field. + BSONArrayBuilder bab; + for (int32_t i = 0; i < 1600000; ++i) { + bab << true; + } + insert(BSON("_id" << 0 << "a" << bab.arr())); + } +}; + + } // namespace Idempotence class DeleteOpIsIdBased : public Base { @@ -1448,6 +1505,7 @@ public: } }; + class All : public Suite { public: All() : Suite("repl") {} @@ -1508,6 +1566,8 @@ public: add<Idempotence::AddToSetEmptyMissing>(); add<Idempotence::ReplaySetPreexistingNoOpPull>(); add<Idempotence::ReplayArrayFieldNotAppended>(); + add<Idempotence::DocSizeLimitExceededOnReplay>(); + add<Idempotence::ArrayBackfillLimitExceededOnReplay>(); add<DeleteOpIsIdBased>(); add<DatabaseIgnorerBasic>(); add<DatabaseIgnorerUpdate>();
- is depended on by
-
SERVER-33946 Decrease number of initial sync attempts in tests to 1
- Blocked
- is duplicated by
-
SERVER-8505 Document size limit can cause idempotency issues
- Closed
-
SERVER-28709 Initial Sync from MMAP to WT fails if large document create by oplog
- Closed
-
SERVER-60160 Initial syncing node can crash due to BSONObjectTooLarge exception thrown while replaying the oplog entries.
- Closed
- is related to
-
SERVER-4781 replica set initial sync failure when update cannot be applied to a future version of an object received via clone
- Closed
- related to
-
SERVER-6399 Refactor update() code
- Closed