-
Type: Bug
-
Resolution: Duplicate
-
Priority: Major - P3
-
None
-
Affects Version/s: None
-
Component/s: None
-
None
-
Query Optimization
-
ALL
This bug has different severity depending on the version. The query is
db.coll.count({predicate})
with secondary reads.
On 4.4, the result is incorrect, and includes orphan documents. A SHARDING_FILTER is missing from the explain plan.
On 5.0 and 6.0, orphan documents are filtered out and the count is correct, but the SHARDING_FILTER stage is not reported in the explain plan. This makes me think the explain is incorrect, and the stage is actually included.
Repro script (based on shard_filtering.js):
```
(function() {
"use strict";
load("jstests/libs/analyze_plan.js");
// Deliberately inserts orphans outside of migration.
TestData.skipCheckOrphans = true;
const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
const collName = "test.shardfilter";
const mongosDb = st.s.getDB("test");
const mongosColl = st.s.getCollection(collName);
assert.commandWorked(st.s.adminCommand(
{enableSharding: "test"}));
st.ensurePrimaryShard("test", st.shard1.name);
assert.commandWorked(
st.s.adminCommand({shardCollection: collName, key: {a: 1, "b.c": 1, "d.e.f": 1}}));
// Put a chunk with no data onto shard0 in order to make sure that both shards get targeted.
assert.commandWorked(st.s.adminCommand({split: collName, middle: {a: 20, "b.c": 0, "d.e.f": 0}}));
assert.commandWorked(st.s.adminCommand({split: collName, middle: {a: 30, "b.c": 0, "d.e.f": 0}}));
assert.commandWorked(st.s.adminCommand(
{moveChunk: collName, find:
, to: st.shard0.shardName}));
// Shard the collection and insert some docs.
const docs = [
{_id: 0, a: 1, b:
, d: {e: {f: 1}}, g: 100, z: "z"},
{_id: 1, a: 1, b:
, d: {e: {f: 2}}, g: 100.9, z: "z"},
{_id: 2, a: 1, b:
, d: {e: {f: 3}}, g: "a", z: "z"},
{_id: 3, a: 1, b:
, d: {e: {f: 3}}, g: [1, 2, 3], z: "z"},
{_id: 4, a: "a", b:
, d: {e: {f: "c"}}, g: null, z: "z"},
{_id: 5, a: 1.0, b:
, d: {e: {f: Infinity}}, g: NaN, z: "z"},
];
assert.commandWorked(mongosColl.insert(docs));
assert.eq(mongosColl.find().itcount(), 6);
// Insert some documents with valid partial shard keys to both shards. The versions of these
// documents on shard0 are orphans, since all of the data is owned by shard1.
const docsWithMissingAndNullKeys = [
{_id: 6, a: "missingParts", z: "z"},
{_id: 7, a: null, b:
, d: {e: {f: 1}}, z: "z"},
{_id: 8, a: "null", b:
, d: {e: {f: 1}}, z: "z"},
{_id: 9, a: "deepNull", b:
, d: {e: {f: null}}, z: "z"},
];
assert.commandWorked(st.shard0.getCollection(collName).insert(docsWithMissingAndNullKeys));
assert.commandWorked(st.shard1.getCollection(collName).insert(docsWithMissingAndNullKeys));
// Insert orphan docs without missing or null shard keys onto shard0 and test that they get filtered
// out.
const orphanDocs = [
{_id: 10, a: 100, b:
, d: {e: {f: 999}}, g: "a", z: "z"},
{_id: 11, a: 101, b:
, d: {e: {f: 1000}}, g: "b", z: "z"}
];
assert.commandWorked(st.shard0.getCollection(collName).insert(orphanDocs));
assert.eq(mongosColl.find().itcount(), 10);
// With primary read pref, count with predicate filters out orphans
assert.eq(mongosColl.count(
), 10);
// The explain plan includes a sharding filter
jsTestLog(mongosColl.explain().count(
));
mongosDb.shardfilter.getMongo().setReadPref("secondary");
// With secondary read pref, count with predicate still filters out orphans
assert.eq(mongosColl.count(
), 10);
// The following explain doesn't include a sharding filter
jsTestLog(mongosColl.explain().count(
));
st.stop();
})();
```
- duplicates
-
SERVER-70810 SHARDING_FILTER stage missing on shards from cluster count command explain with query predicate
- Backlog