Uploaded image for project: 'Core Server'
  1. Core Server
  2. SERVER-18912

Fatal assertion (duplicate key error) when replicating insert on capped collection

    • Type: Icon: Bug Bug
    • Resolution: Done
    • Priority: Icon: Major - P3 Major - P3
    • 3.1.6
    • Affects Version/s: None
    • Component/s: Storage
    • Fully Compatible
    • ALL
    • Hide

      Run with `python buildscripts/smoke.py repro.js`

      'use strict';
      
      load('jstests/libs/parallelTester.js');
      
      function cappedWorkload() {
          var conn = new Mongo();
          var db = conn.getDB('test');
      
          while (true) {
              try {
                  if (Random.randInt(100) === 0) {
                      db.foo.drop();
                  }
                  db.createCollection('foo', {capped: true, size: 4096});
      
                  db.foo.ensureIndex({value: 1});
                  for (var i = 0; i < 10; ++i) {
                      db.foo.insert({_id: i, value: 0});
                  }
              } catch (e) {
                  // ignore
              }
          }
      }
      
      // start the replica set
      var rs = new ReplSetTest({nodes: 3, startPort: 27017});
      rs.startSet();
      rs.initiate();
      rs.awaitSecondaryNodes();
      
      // start the workload threads
      var threads = [];
      for (var i = 0; i < 20; i++) {
          var t = new ScopedThread(cappedWorkload);
      
          threads.push(t);
          t.start();
      }
      
      threads.forEach(function(t) {
          t.join();
      });
      
      Show
      Run with `python buildscripts/smoke.py repro.js` 'use strict'; load('jstests/libs/parallelTester.js'); function cappedWorkload() { var conn = new Mongo(); var db = conn.getDB('test'); while (true) { try { if (Random.randInt(100) === 0) { db.foo.drop(); } db.createCollection('foo', {capped: true, size: 4096}); db.foo.ensureIndex({value: 1}); for (var i = 0; i < 10; ++i) { db.foo.insert({_id: i, value: 0}); } } catch (e) { // ignore } } } // start the replica set var rs = new ReplSetTest({nodes: 3, startPort: 27017}); rs.startSet(); rs.initiate(); rs.awaitSecondaryNodes(); // start the workload threads var threads = []; for (var i = 0; i < 20; i++) { var t = new ScopedThread(cappedWorkload); threads.push(t); t.start(); } threads.forEach(function(t) { t.join(); });
    • Quint Iteration 5, Quint Iteration 6
    • 0

      Versions affected: recent versions of master (i.e., versions after the bump for 3.1.4)

      From some preliminary bisecting, this bug seems to have been introduced since 3.1.4. It only seems to affect the WiredTiger storage engine:

       m27019| 2015-06-10T15:06:40.940-0400 I INDEX    [repl writer worker 15] build index on: test.foo properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "test.foo" }
       m27019| 2015-06-10T15:06:40.940-0400 I INDEX    [repl writer worker 15] 	 building index using bulk method
       m27019| 2015-06-10T15:06:40.946-0400 I INDEX    [repl writer worker 15] build index done.  scanned 0 total records. 0 secs
       m27019| 2015-06-10T15:06:40.996-0400 E REPL     [repl writer worker 10] writer worker caught exception:  :: caused by :: 11000 E11000 duplicate key error collection: test.foo index: _id_ dup key: { : 8.0 } on: { ts: Timestamp 1433963200000|13, t: 0, h: -3679832681211329472, v: 2, op: "i", ns: "test.foo", o: { _id: 8.0, value: 0.0 } }
       m27019| 2015-06-10T15:06:40.996-0400 I -        [repl writer worker 10] Fatal Assertion 16360
       m27019| 2015-06-10T15:06:40.996-0400 I -        [repl writer worker 10] 
       m27019| 
       m27019| ***aborting after fassert() failure
      

      Versions

            Assignee:
            mathias@mongodb.com Mathias Stearn
            Reporter:
            kamran.khan Kamran K.
            Votes:
            0 Vote for this issue
            Watchers:
            9 Start watching this issue

              Created:
              Updated:
              Resolved: