diff --git a/buildscripts/gdb/mongo_printers.py b/buildscripts/gdb/mongo_printers.py index bfb4f670def..76ac681c1aa 100644 --- a/buildscripts/gdb/mongo_printers.py +++ b/buildscripts/gdb/mongo_printers.py @@ -774,11 +774,7 @@ class SbeCodeFragmentPrinter(object): value_size = gdb.lookup_type('mongo::sbe::value::Value').sizeof uint8_size = gdb.lookup_type('uint8_t').sizeof uint32_size = gdb.lookup_type('uint32_t').sizeof - uint64_size = gdb.lookup_type('uint64_t').sizeof builtin_size = gdb.lookup_type('mongo::sbe::vm::Builtin').sizeof - time_unit_size = gdb.lookup_type('mongo::TimeUnit').sizeof - timezone_size = gdb.lookup_type('mongo::TimeZone').sizeof - day_of_week_size = gdb.lookup_type('mongo::DayOfWeek').sizeof cur_op = self.pdata end_op = self.pdata + self.size @@ -806,7 +802,7 @@ class SbeCodeFragmentPrinter(object): offset = read_as_integer(cur_op, int_size) cur_op += int_size args = 'offset: ' + str(offset) + ', target: ' + hex(cur_op + offset) - elif op_name in ['pushConstVal', 'getFieldImm']: + elif op_name in ['pushConstVal', 'getFieldConst']: tag = read_as_integer(cur_op, tag_size) args = 'tag: ' + self.valuetags_lookup.get(tag, "unknown") + \ ', value: ' + hex(read_as_integer(cur_op + tag_size, value_size)) @@ -818,7 +814,7 @@ class SbeCodeFragmentPrinter(object): args = 'convert to: ' + \ self.valuetags_lookup.get(read_as_integer(cur_op, tag_size), "unknown") cur_op += tag_size - elif op_name in ['typeMatchImm']: + elif op_name in ['typeMatch']: args = 'mask: ' + hex(read_as_integer(cur_op, uint32_size)) cur_op += uint32_size elif op_name in ['function', 'functionSmall']: @@ -830,10 +826,10 @@ class SbeCodeFragmentPrinter(object): args = 'builtin: ' + self.builtins_lookup.get(builtin_id, "unknown") args += ' arity: ' + str(read_as_integer(cur_op + builtin_size, arity_size)) cur_op += (builtin_size + arity_size) - elif op_name in ['fillEmptyImm']: + elif op_name in ['fillEmptyConst']: args = 'Instruction::Constants: ' + str(read_as_integer(cur_op, uint8_size)) cur_op += uint8_size - elif op_name in ['traverseFImm', 'traversePImm']: + elif op_name in ['traverseFConst', 'traversePConst']: const_enum = read_as_integer(cur_op, uint8_size) cur_op += uint8_size args = \ @@ -842,19 +838,6 @@ class SbeCodeFragmentPrinter(object): elif op_name in ['applyClassicMatcher']: args = 'MatchExpression* ' + hex(read_as_integer(cur_op, ptr_size)) cur_op += ptr_size - elif op_name in ['dateTruncImm']: - unit = read_as_integer(cur_op, time_unit_size) - cur_op += time_unit_size - args = 'unit: ' + str(unit) - bin_size = read_as_integer(cur_op, uint64_size) - cur_op += uint64_size - args += ', binSize: ' + str(bin_size) - timezone = read_as_integer(cur_op, timezone_size) - cur_op += timezone_size - args += ', timezone: ' + hex(timezone) - day_of_week = read_as_integer(cur_op, day_of_week_size) - cur_op += day_of_week_size - args += ', dayOfWeek: ' + str(day_of_week) yield hex(op_addr), '{} ({})'.format(op_name, args) diff --git a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_passthrough.yml index 8e00532152e..87cff833560 100644 --- a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/cqf_passthrough.yml @@ -14,7 +14,7 @@ selector: # TODO SERVER-23229 on classic which is fixed by SERVER-67548 in CQF. - jstests/aggregation/bugs/groupMissing.js - jstests/core/null_query_semantics.js - # TODO SERVER-70142 Populate planSummary field which is shown in output of $currentOp. + # TODO SERVER-62407 translate to ABT directly from find. - jstests/core/currentop_cursors.js # TODO SERVER-67517 - jstests/core/field_name_empty.js diff --git a/etc/evergreen.yml b/etc/evergreen.yml index 339a43d6406..782667be1ad 100644 --- a/etc/evergreen.yml +++ b/etc/evergreen.yml @@ -1569,100 +1569,6 @@ buildvariants: --use-glibcxx-debug --dbg=on --allocator=system - exec_timeout_secs: 32400 # 9 hour timeout - timeout_secs: 18000 # 5 hour idle timeout - tasks: - - name: compile_test_and_package_parallel_core_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_unittest_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_dbtest_stream_TG - distros: - - rhel80-xlarge - - name: test_api_version_compatibility - - name: .aggfuzzer !.feature_flag_guarded !.no_debug_mode - - name: .aggregation !.feature_flag_guarded !.no_debug_mode - - name: audit - - name: .auth !.no_debug_mode - - name: .causally_consistent !.sharding - - name: .change_streams !.no_debug_mode - - name: .change_stream_fuzzer - - name: .misc_js !.no_debug_mode - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.no_debug_mode - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.no_debug_mode - distros: - - rhel80-medium - - name: config_fuzzer_concurrency - - name: config_fuzzer_jsCore - - name: config_fuzzer_replica_sets_jscore_passthrough - distros: - - rhel80-large - - name: disk_wiredtiger - - name: .encrypt !.no_debug_mode - - name: idl_tests - - name: initial_sync_fuzzer_gen - - name: .integration - distros: - - rhel80-medium - - name: jsCore - distros: - - rhel80-xlarge - - name: .jscore .common !jsCore - - name: jsCore_minimum_batch_size - - name: jsCore_txns_large_txns_format - - name: json_schema - # TODO(SERVER-69996) reenable after ticket is complete - # - name: .jstestfuzz !.flow_control - - name: libunwind_tests - - name: .multiversion_sanity_check - - name: mqlrun - - name: .multi_shard !.no_debug_mode - - name: multiversion_gen - - name: .ocsp - - name: .query_fuzzer - - name: .read_write_concern .large - distros: - - rhel80-medium - - name: .read_write_concern !.large - # TODO(SERVER-69951) reenable after ticket is complete - # - name: .replica_sets !.encrypt !.auth - # distros: - # - rhel80-xlarge - - name: replica_sets_api_version_jscore_passthrough_gen - - name: replica_sets_reconfig_jscore_passthrough_gen - - name: replica_sets_reconfig_kill_primary_jscore_passthrough_gen - distros: - - rhel80-xlarge - - name: retryable_writes_jscore_passthrough_gen - - name: .read_only - - name: .rollbackfuzzer - - name: sasl - - name: search - - name: search_auth - - name: search_ssl - - name: session_jscore_passthrough - - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.no_debug_mode - - name: sharding_api_version_jscore_passthrough_gen - - name: .stitch - - name: .crypt - distros: - - rhel80-xlarge - - name: crypt_build_debug_and_test - distros: - - rhel80-xlarge - - name: .updatefuzzer !.no_debug_mode - - name: secondary_reads_passthrough_gen - - name: server_discovery_and_monitoring_json_test_TG - # TODO(SERVER-70015) reenable after ticket is complete - # - name: .serverless - # distros: - # - rhel80-xlarge - - name: server_selection_json_test_TG - distros: - - rhel80-xlarge - - name: generate_buildid_to_debug_symbols_mapping - &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required @@ -3712,22 +3618,6 @@ buildvariants: # distros: # - rhel80-xlarge -- <<: *enterprise-rhel-80-64-bit-dynamic-required-template - name: enterprise-rhel-80-64-bit-dynamic-required-security-patch-only-unittests - display_name: "~ Shared Library Enterprise RHEL 8.0 Security Patch Only (Unittests)" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history. - expansions: - <<: *enterprise-rhel-80-64-bit-dynamic-required-expansions - target_resmoke_time: 15 - max_sub_suites: 15 - tasks: - - name: compile_test_and_package_parallel_unittest_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_dbtest_stream_TG - distros: - - rhel80-xlarge - - name: enterprise-ubuntu1804-64-libvoidstar display_name: ~ Enterprise Ubuntu 18.04 w/ libvoidstar modules: diff --git a/etc/evergreen_yml_components/definitions.yml b/etc/evergreen_yml_components/definitions.yml index ce3dea1e243..4f8c2ce8bb4 100644 --- a/etc/evergreen_yml_components/definitions.yml +++ b/etc/evergreen_yml_components/definitions.yml @@ -3672,7 +3672,7 @@ tasks: ## Standalone fuzzer for checking timeseries optimizations correctness ## - <<: *jstestfuzz_template name: aggregation_timeseries_fuzzer_gen - tags: ["aggfuzzer", "common", "timeseries", "require_npm", "random_name", "no_debug_mode"] + tags: ["aggfuzzer", "common", "timeseries", "require_npm", "random_name"] commands: - func: "generate resmoke tasks" vars: @@ -3761,7 +3761,7 @@ tasks: ## jstestfuzz standalone update time-series generational fuzzer ## - <<: *jstestfuzz_template name: update_timeseries_fuzzer_gen - tags: ["updatefuzzer", "require_npm", "random_name", "no_debug_mode"] + tags: ["updatefuzzer", "require_npm", "random_name"] commands: - func: "generate resmoke tasks" vars: @@ -3776,7 +3776,7 @@ tasks: ## jstestfuzz replication update generational fuzzer ## - <<: *jstestfuzz_template name: update_fuzzer_replication_gen - tags: ["updatefuzzer", "require_npm", "random_name", "multiversion", "no_debug_mode"] + tags: ["updatefuzzer", "require_npm", "random_name", "multiversion"] commands: - func: "generate resmoke tasks" vars: @@ -4426,7 +4426,7 @@ tasks: - <<: *task_template name: aggregation_read_concern_majority_passthrough - tags: ["aggregation", "read_write_concern", "no_debug_mode"] + tags: ["aggregation", "read_write_concern"] depends_on: - name: aggregation commands: @@ -4573,7 +4573,7 @@ tasks: - <<: *task_template name: change_streams_mongos_passthrough - tags: ["change_streams", "no_debug_mode"] + tags: ["change_streams"] depends_on: - name: change_streams commands: @@ -4642,7 +4642,7 @@ tasks: - <<: *task_template name: change_streams_whole_db_mongos_passthrough - tags: ["change_streams", "no_debug_mode"] + tags: ["change_streams"] depends_on: - name: change_streams_mongos_passthrough commands: @@ -4678,7 +4678,7 @@ tasks: - <<: *task_template name: change_streams_whole_cluster_mongos_passthrough - tags: ["change_streams", "no_debug_mode"] + tags: ["change_streams"] depends_on: - name: change_streams_mongos_passthrough commands: @@ -5288,7 +5288,7 @@ tasks: - <<: *gen_task_template name: noPassthrough_gen - tags: ["misc_js", "no_debug_mode"] + tags: ["misc_js"] commands: - func: "generate resmoke tasks" vars: @@ -5411,7 +5411,7 @@ tasks: - <<: *gen_task_template name: multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough_gen - tags: ["multi_shard", "no_debug_mode"] + tags: ["multi_shard"] commands: - func: "generate resmoke tasks" vars: @@ -5635,7 +5635,7 @@ tasks: - <<: *gen_task_template name: concurrency_replication_multi_stmt_txn_gen - tags: ["concurrency", "common", "repl", "txn", "no_debug_mode"] + tags: ["concurrency", "common", "repl", "txn"] commands: - func: "generate resmoke tasks" vars: @@ -5687,7 +5687,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_replication_gen - tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "common", "read_concern_maj", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5704,7 +5704,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_replication_with_balancer_gen - tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "common", "read_concern_maj", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5735,7 +5735,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_clusterwide_ops_add_remove_shards_gen - tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "common", "read_concern_maj", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5744,7 +5744,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_causal_consistency_gen - tags: ["concurrency", "non_live_record", "sharded", "no_debug_mode"] + tags: ["concurrency", "non_live_record", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5753,7 +5753,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_causal_consistency_and_balancer_gen - tags: ["concurrency", "large", "non_live_record", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "non_live_record", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5762,7 +5762,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_with_stepdowns_gen - tags: ["concurrency", "stepdowns", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5771,7 +5771,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_with_stepdowns_and_balancer_gen - tags: ["concurrency", "stepdowns", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5780,7 +5780,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_terminate_primary_with_balancer_gen - tags: ["concurrency", "stepdowns", "kill_terminate", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "kill_terminate", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5789,7 +5789,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_kill_primary_with_balancer_gen - tags: ["concurrency", "stepdowns", "kill_terminate", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "kill_terminate", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5798,7 +5798,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_multi_stmt_txn_gen - tags: ["concurrency", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5807,7 +5807,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_multi_stmt_txn_with_balancer_gen - tags: ["concurrency", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5816,7 +5816,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_local_read_write_multi_stmt_txn_gen - tags: ["concurrency", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5825,7 +5825,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer_gen - tags: ["concurrency", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5834,7 +5834,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_multi_stmt_txn_with_stepdowns_gen - tags: ["concurrency", "stepdowns", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5843,7 +5843,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_multi_stmt_txn_terminate_primary_gen - tags: ["concurrency", "stepdowns", "kill_terminate", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "kill_terminate", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5852,7 +5852,7 @@ tasks: - <<: *gen_task_template name: concurrency_sharded_multi_stmt_txn_kill_primary_gen - tags: ["concurrency", "stepdowns", "kill_terminate", "sharded", "no_debug_mode"] + tags: ["concurrency", "stepdowns", "kill_terminate", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -5955,7 +5955,7 @@ tasks: - <<: *gen_task_template name: replica_sets_ese_gen - tags: ["replica_sets", "encrypt", "san", "no_debug_mode"] + tags: ["replica_sets", "encrypt", "san"] commands: - func: "generate resmoke tasks" vars: @@ -5963,7 +5963,7 @@ tasks: - <<: *gen_task_template name: replica_sets_ese_gcm_gen - tags: ["replica_sets", "encrypt", "san", "gcm", "no_debug_mode"] + tags: ["replica_sets", "encrypt", "san", "gcm"] commands: - func: "generate resmoke tasks" vars: @@ -5971,7 +5971,7 @@ tasks: - <<: *gen_task_template name: replica_sets_auth_gen - tags: ["replica_sets", "common", "san", "auth", "no_debug_mode"] + tags: ["replica_sets", "common", "san", "auth"] commands: - func: "generate resmoke tasks" vars: @@ -6047,7 +6047,7 @@ tasks: - <<: *gen_task_template name: sharding_ese_gen - tags: ["sharding", "encrypt", "no_debug_mode"] + tags: ["sharding", "encrypt"] commands: - func: "generate resmoke tasks" vars: @@ -6055,7 +6055,7 @@ tasks: - <<: *gen_task_template name: sharding_ese_gcm_gen - tags: ["sharding", "encrypt", "gcm", "no_debug_mode"] + tags: ["sharding", "encrypt", "gcm"] commands: - func: "generate resmoke tasks" vars: @@ -6063,7 +6063,7 @@ tasks: - <<: *gen_task_template name: sharding_auth_gen - tags: ["sharding", "auth", "no_debug_mode"] + tags: ["sharding", "auth"] commands: - func: "generate resmoke tasks" vars: @@ -6071,7 +6071,7 @@ tasks: - <<: *gen_task_template name: sharding_auth_audit_gen - tags: ["auth", "audit", "non_live_record", "no_debug_mode"] + tags: ["auth", "audit", "non_live_record"] commands: - func: "generate resmoke tasks" vars: @@ -6079,7 +6079,7 @@ tasks: - <<: *gen_task_template name: sharding_hello_failures_gen - tags: ["concurrency", "large", "sharded", "no_debug_mode"] + tags: ["concurrency", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: @@ -6394,7 +6394,7 @@ tasks: - <<: *task_template name: ocsp - tags: ["ssl", "encrypt", "ocsp", "patch_build", "no_debug_mode"] + tags: ["ssl", "encrypt", "ocsp", "patch_build"] commands: - func: "do setup" - func: "run tests" diff --git a/etc/generate_subtasks_config.yml b/etc/generate_subtasks_config.yml index 06d47f1b87e..4b78e2b086b 100644 --- a/etc/generate_subtasks_config.yml +++ b/etc/generate_subtasks_config.yml @@ -39,14 +39,6 @@ build_variant_large_distro_exceptions: - macos-debug-suggested - rhel70 - rhel80 - - rhel80-debug-asan - - rhel80-debug-asan-all-feature-flags - - rhel80-debug-asan-classic-engine - - rhel80-debug-aubsan-lite-required - - rhel80-debug-suggested - - rhel80-debug-ubsan - - rhel80-debug-ubsan-all-feature-flags - - rhel80-debug-ubsan-classic-engine - rhel-82-arm64 - rhel90 - suse12 @@ -54,3 +46,11 @@ build_variant_large_distro_exceptions: - ubi8 - ubuntu1604-container - ubuntu1804-container + - ubuntu1604-debug + - ubuntu1804-debug-asan + - ubuntu1804-debug-asan-all-feature-flags + - ubuntu1804-debug-asan-classic-engine + - ubuntu1804-debug-aubsan-lite-required + - ubuntu1804-debug-ubsan + - ubuntu1804-debug-ubsan-all-feature-flags + - ubuntu1804-debug-ubsan-classic-engine diff --git a/etc/perf.yml b/etc/perf.yml index aea6c6c90fd..cda93a6e7a3 100644 --- a/etc/perf.yml +++ b/etc/perf.yml @@ -868,16 +868,14 @@ buildvariants: - <<: *linux-wt-standalone name: linux-wt-standalone-classic-query-engine display_name: Standalone Linux inMemory (Classic Query Engine) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday expansions: mongodb_setup: microbenchmarks_standalone-classic-query-engine - <<: *linux-wt-standalone name: linux-wt-standalone-sbe display_name: Standalone Linux inMemory (SBE) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * *" # Run it every day for now to get a stable baseline and then change it back to 00:00 on Thursday expansions: mongodb_setup: microbenchmarks_standalone-sbe diff --git a/etc/system_perf.yml b/etc/system_perf.yml index 30eb681fa96..bcfae7884bc 100755 --- a/etc/system_perf.yml +++ b/etc/system_perf.yml @@ -1225,8 +1225,7 @@ buildvariants: - name: linux-standalone-classic-query-engine display_name: Linux Standalone (Classic Query Engine) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup: standalone-classic-query-engine @@ -1260,8 +1259,7 @@ buildvariants: - name: linux-standalone-sbe display_name: Linux Standalone (SBE) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * *" # Run it every day for now to get a stable baseline and then change it back to 00:00 on Thursday modules: *modules expansions: mongodb_setup: standalone-sbe @@ -1277,8 +1275,7 @@ buildvariants: - name: linux-1-node-replSet-classic-query-engine display_name: Linux 1-Node ReplSet (Classic Query Engine) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup: single-replica-classic-query-engine @@ -1295,12 +1292,10 @@ buildvariants: - name: schedule_variant_auto_tasks - name: linkbench - name: linkbench2 - - name: snapshot_reads - name: linux-1-node-replSet-sbe display_name: Linux 1-Node ReplSet (SBE) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * *" # Run it every day for now to get a stable baseline and then change it back to 00:00 on Thursday modules: *modules expansions: mongodb_setup: single-replica-sbe diff --git a/evergreen/functions/venv_setup.sh b/evergreen/functions/venv_setup.sh index 9b53989aa0e..3d935d87594 100644 --- a/evergreen/functions/venv_setup.sh +++ b/evergreen/functions/venv_setup.sh @@ -74,9 +74,10 @@ activate_venv echo "Upgrading pip to 21.0.1" python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" || exit 1 -if ! python -m pip --disable-pip-version-check install -r "$toolchain_txt" -q --log install.log; then +python -m pip --disable-pip-version-check install -r "$toolchain_txt" -q --log install.log +if [ $? != 0 ]; then echo "Pip install error" - cat install.log || true + cat install.log exit 1 fi python -m pip freeze > pip-requirements.txt diff --git a/jstests/aggregation/expressions/subtract.js b/jstests/aggregation/expressions/subtract.js deleted file mode 100644 index 91e46c08b72..00000000000 --- a/jstests/aggregation/expressions/subtract.js +++ /dev/null @@ -1,34 +0,0 @@ -(function() { -"use strict"; - -const coll = db.add_coll; -coll.drop(); - -assert.commandWorked(coll.insert({_id: 0, lhs: 1, rhs: 1})); -assert.commandWorked(coll.insert({_id: 1, lhs: -2000000000, rhs: 2000000000})); -assert.commandWorked( - coll.insert({_id: 2, lhs: NumberLong(-20000000000), rhs: NumberLong(20000000000)})); -assert.commandWorked(coll.insert({_id: 3, lhs: 10.5, rhs: 0.5})); -assert.commandWorked( - coll.insert({_id: 4, lhs: NumberDecimal("10000.12345"), rhs: NumberDecimal("10.1234")})); -assert.commandWorked(coll.insert({_id: 5, lhs: new Date(1912392670000), rhs: 70000})); -assert.commandWorked( - coll.insert({_id: 6, lhs: new Date(1912392670000), rhs: new Date(1912392600000)})); -assert.commandWorked(coll.insert( - {_id: 7, lhs: NumberLong("9000000000000000000"), rhs: NumberLong("-9000000000000000000")})); - -const result = - coll.aggregate([{$project: {diff: {$subtract: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}]) - .toArray(); -assert.eq(result[0].diff, 0); -assert.eq(result[1].diff, NumberLong("-4000000000")); -assert.eq(result[2].diff, NumberLong("-40000000000")); -assert.eq(result[3].diff, 10.0); -assert.eq(result[4].diff, NumberDecimal("9990.00005")); -assert.eq(result[5].diff, new Date(1912392600000)); -assert.eq(result[6].diff, 70000); - -// TODO WRITING-10039 After type promotion algorithm is fixed, we need to use more strict assert -// to check type promotion -assert.eq(bsonWoCompare(result[7].diff, 1.8e+19), 0); -}()); diff --git a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js index 041c6e195e9..40eeb984f45 100644 --- a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js +++ b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js @@ -29,7 +29,7 @@ let smallPartitionSize = 6; let largePartitionSize = 21; setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - avgDocSize * smallPartitionSize + 73); + avgDocSize * smallPartitionSize + 50); seedWithTickerData(coll, 10); diff --git a/jstests/auth/multitenancy_test_authzn.js b/jstests/auth/multitenancy_test_authzn.js index 3a5dc7c4f01..543a440a8b6 100644 --- a/jstests/auth/multitenancy_test_authzn.js +++ b/jstests/auth/multitenancy_test_authzn.js @@ -30,7 +30,8 @@ function runTests(conn, tenant, multitenancySupport) { } } - const admin = conn.getDB('admin'); + // TODO (SERVER-67423) Use $tenant with {find:...} operation + const tenantAdmin = conn.getDB((tenant ? tenant.str + '_' : '') + 'admin'); const test = conn.getDB('test'); const cmdSuffix = (tenant === null) ? {} : {"$tenant": tenant}; function runCmd(cmd) { @@ -38,31 +39,6 @@ function runTests(conn, tenant, multitenancySupport) { return test.runCommand(cmdToRun); } - function runFindCmdWithTenant(db, collection, filter) { - let result = []; - let cmdRes = assert.commandWorked( - db.runCommand({find: collection, filter: filter, batchSize: 0, "$tenant": tenant})); - result = result.concat(cmdRes.cursor.firstBatch); - - let cursorId = cmdRes.cursor.id; - while (cursorId != 0) { - cmdRes = assert.commandWorked(db.runCommand( - {getMore: cursorId, collection: collection, batchSize: 1, "$tenant": tenant})); - result = result.concat(cmdRes.cursor.nextBatch); - cursorId = cmdRes.cursor.id; - } - - return result; - } - - function findTenantRoles(filter) { - return runFindCmdWithTenant(admin, "system.roles", filter); - } - - function findTenantUsers(filter) { - return runFindCmdWithTenant(admin, "system.users", filter); - } - function validateCounts(expectUsers, expectRoles) { const filter = {db: 'test'}; const admin = conn.getDB('admin'); @@ -79,8 +55,8 @@ function runTests(conn, tenant, multitenancySupport) { if (tenant) { // Look for users/roles in tenant specific collections directly. - const tenantUsers = findTenantUsers(filter); - const tenantRoles = findTenantRoles(filter); + const tenantUsers = tenantAdmin.system.users.find(filter).toArray(); + const tenantRoles = tenantAdmin.system.roles.find(filter).toArray(); assert.eq(tenantUsers.length, expectUsers, tojson(tenantUsers)); assert.eq(tenantRoles.length, expectRoles, tojson(tenantRoles)); @@ -114,13 +90,13 @@ function runTests(conn, tenant, multitenancySupport) { validateCounts(3, 4); if (tenant && expectSuccess) { - const myCollUser = findTenantUsers({_id: 'test.myCollUser'})[0]; + const myCollUser = tenantAdmin.system.users.find({_id: 'test.myCollUser'}).toArray()[0]; assert.eq(tojson(myCollUser.roles), tojson(myCollUser_roles), tojson(myCollUser)); - const rwMyColl = findTenantRoles({_id: 'test.rwMyColl'})[0]; + const rwMyColl = tenantAdmin.system.roles.find({_id: 'test.rwMyColl'}).toArray()[0]; assert.eq(tojson(rwMyColl.privileges), tojson(rwMyColl_privs), tojson(rwMyColl)); - const role2 = findTenantRoles({_id: 'test.role2'})[0]; + const role2 = tenantAdmin.system.roles.find({_id: 'test.role2'}).toArray()[0]; assert.eq(tojson(role2.roles), tojson([{role: 'role1', db: 'test'}]), tojson(role2)); - const role3 = findTenantRoles({_id: 'test.role3'})[0]; + const role3 = tenantAdmin.system.roles.find({_id: 'test.role3'}).toArray()[0]; assert.eq(tojson(role3.roles), tojson([{role: 'role1', db: 'test'}]), tojson(role3)); } @@ -139,7 +115,7 @@ function runTests(conn, tenant, multitenancySupport) { {resource: {db: 'test', collection: 'myColl'}, actions: ['insert', 'remove', 'update']}, {resource: {db: 'test', collection: 'otherColl'}, actions: ['find']} ]; - const rwMyColl = findTenantRoles({_id: 'test.rwMyColl'})[0]; + const rwMyColl = tenantAdmin.system.roles.find({_id: 'test.rwMyColl'}).toArray()[0]; assert.eq(tojson(rwMyColl.privileges), tojson(rwMyColl_expectPrivs), tojson(rwMyColl)); } @@ -151,14 +127,14 @@ function runTests(conn, tenant, multitenancySupport) { validateCounts(3, 4); if (tenant && expectSuccess) { - const user1 = findTenantUsers({_id: 'test.user1'})[0]; + const user1 = tenantAdmin.system.users.find({_id: 'test.user1'}).toArray()[0]; assert.eq(tojson(user1.roles), tojson([{role: 'role1', db: 'test'}]), tojson(user1)); - const user2 = findTenantUsers({_id: 'test.user2'})[0]; + const user2 = tenantAdmin.system.users.find({_id: 'test.user2'}).toArray()[0]; assert.eq(tojson(user2.roles), tojson([{role: 'role3', db: 'test'}]), tojson(user2)); - const role1 = findTenantRoles({_id: 'test.role1'})[0]; + const role1 = tenantAdmin.system.roles.find({_id: 'test.role1'}).toArray()[0]; assert.eq(tojson(role1.roles), tojson([{role: 'rwMyColl', db: 'test'}]), tojson(role1)); - const role3 = findTenantRoles({_id: 'test.role3'})[0]; + const role3 = tenantAdmin.system.roles.find({_id: 'test.role3'}).toArray()[0]; assert.eq(tojson(role3.roles), tojson([]), tojson(role3)); } @@ -168,9 +144,9 @@ function runTests(conn, tenant, multitenancySupport) { validateCounts(3, 4); if (tenant && expectSuccess) { - const user1 = findTenantUsers({_id: 'test.user1'})[0]; + const user1 = tenantAdmin.system.users.find({_id: 'test.user1'}).toArray()[0]; assert.eq(tojson(user1.roles), tojson([{role: 'role2', db: 'test'}]), tojson(user1)); - const role2 = findTenantRoles({_id: 'test.role2'})[0]; + const role2 = tenantAdmin.system.roles.find({_id: 'test.role2'}).toArray()[0]; assert.eq(tojson(role2.roles), tojson([{role: 'rwMyColl', db: 'test'}]), tojson(role2)); } @@ -181,10 +157,10 @@ function runTests(conn, tenant, multitenancySupport) { if (tenant && expectSuccess) { // role2 should have been revoked from user1 during drop,. - const user1 = findTenantUsers({_id: 'test.user1'})[0]; + const user1 = tenantAdmin.system.users.find({_id: 'test.user1'}).toArray()[0]; assert.eq(tojson(user1.roles), tojson([]), tojson(user1)); - assert.eq(0, findTenantUsers({_id: 'test.myCollUser'}).length); - assert.eq(0, findTenantRoles({_id: 'test.role2'}).length); + assert.eq(0, tenantAdmin.system.users.find({_id: 'test.myCollUser'}).toArray().length); + assert.eq(0, tenantAdmin.system.roles.find({_id: 'test.role2'}).toArray().length); } // Cleanup diff --git a/jstests/auth/security_token.js b/jstests/auth/security_token.js index 21bc8ed4942..a2f59b2f395 100644 --- a/jstests/auth/security_token.js +++ b/jstests/auth/security_token.js @@ -42,6 +42,7 @@ function makeTokenAndExpect(user, db) { function runTest(conn, enabled, rst = undefined) { const admin = conn.getDB('admin'); + const tenantAdmin = conn.getDB(tenantID.str + '_admin'); // Must be authenticated as a user with ActionType::useTenant in order to use $tenant assert.commandWorked(admin.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); @@ -50,7 +51,6 @@ function runTest(conn, enabled, rst = undefined) { // Create a tenant-local user. const createUserCmd = {createUser: 'user1', "$tenant": tenantID, pwd: 'pwd', roles: ['readWriteAnyDatabase']}; - const countUserCmd = {count: "system.users", query: {user: 'user1'}, "$tenant": tenantID}; if (enabled) { assert.commandWorked(admin.runCommand(createUserCmd)); @@ -59,8 +59,9 @@ function runTest(conn, enabled, rst = undefined) { assert.eq(admin.system.users.count({user: 'user1'}), 0, 'user1 should not exist on global users collection'); - const usersCount = assert.commandWorked(admin.runCommand(countUserCmd)); - assert.eq(usersCount.n, 1, 'user1 should exist on tenant users collection'); + assert.eq(tenantAdmin.system.users.count({user: 'user1'}), + 1, + 'user1 should exist on tenant users collection'); } else { assert.commandFailed(admin.runCommand(createUserCmd)); } diff --git a/jstests/auth/validate_sasl_mechanism.js b/jstests/auth/validate_sasl_mechanism.js index 488fdb3afb4..a819c081ff2 100644 --- a/jstests/auth/validate_sasl_mechanism.js +++ b/jstests/auth/validate_sasl_mechanism.js @@ -12,7 +12,7 @@ function waitFailedToStart(pid, exitCode) { } return res.exitCode == exitCode; - }, `Failed to wait for ${pid} to die with exit code ${exitCode}`, 60 * 1000); + }, `Failed to wait for ${pid} to die with exit code ${exitCode}`, 30 * 1000); } const m = MongoRunner.runMongod({ diff --git a/jstests/concurrency/fsm_workload_helpers/drop_utils.js b/jstests/concurrency/fsm_workload_helpers/drop_utils.js index f0d86a1fb49..9d62df9c4a1 100644 --- a/jstests/concurrency/fsm_workload_helpers/drop_utils.js +++ b/jstests/concurrency/fsm_workload_helpers/drop_utils.js @@ -35,32 +35,23 @@ function dropDatabases(db, pattern) { * Helper for dropping roles or users that were created by a workload * during its execution. */ -function dropUtilRetry(elems, cb, message) { - const kNumRetries = 5; - const kRetryInterval = 5000; - - assert.retry(function() { - elems = elems.filter((elem) => !cb(elem)); - return elems.length === 0; - }, message, kNumRetries, kRetryInterval); -} function dropRoles(db, pattern) { assert(pattern instanceof RegExp, 'expected pattern to be a regular expression'); - const rolesToDrop = db.getRoles().map((ri) => ri.role).filter((r) => pattern.test(r)); - dropUtilRetry( - rolesToDrop, - (role) => db.dropRole(role), - "Failed dropping roles: " + tojson(rolesToDrop) + " from database " + db.getName()); + db.getRoles().forEach(function(roleInfo) { + if (pattern.test(roleInfo.role)) { + assertAlways(db.dropRole(roleInfo.role)); + } + }); } function dropUsers(db, pattern) { assert(pattern instanceof RegExp, 'expected pattern to be a regular expression'); - const usersToDrop = db.getUsers().map((ui) => ui.user).filter((u) => pattern.test(u)); - dropUtilRetry( - usersToDrop, - (user) => db.dropUser(user), - "Failed dropping users: " + tojson(usersToDrop) + " from database " + db.getName()); + db.getUsers().forEach(function(userInfo) { + if (pattern.test(userInfo.user)) { + assertAlways(db.dropUser(userInfo.user)); + } + }); } diff --git a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js index 0d1d2bc1763..132b9cda167 100644 --- a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js +++ b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js @@ -33,29 +33,6 @@ var $config = extendWorkload($config, function($config, $super) { jsTestLog('setFCV state finished'); }; - // Inherithed methods get overridden to tolerate the interruption of - // internal transactions on the config server during the execution of setFCV - // TODO SERVER-70131: remove the overrides if internal transactions are no longer interrupted. - $config.states.enableSharding = function(db, collName) { - try { - $super.states.enableSharding.apply(this, arguments); - } catch (err) { - if (err.code !== ErrorCodes.Interrupted) { - throw err; - } - } - }; - - $config.states.shardCollection = function(db, collName) { - try { - $super.states.shardCollection.apply(this, arguments); - } catch (err) { - if (err.code !== ErrorCodes.Interrupted) { - throw err; - } - } - }; - $config.transitions = { init: {enableSharding: 0.3, dropDatabase: 0.3, shardCollection: 0.3, setFCV: 0.1}, enableSharding: {enableSharding: 0.3, dropDatabase: 0.3, shardCollection: 0.3, setFCV: 0.1}, diff --git a/jstests/core/null_query_semantics.js b/jstests/core/null_query_semantics.js index cf4cc836412..34e605db283 100644 --- a/jstests/core/null_query_semantics.js +++ b/jstests/core/null_query_semantics.js @@ -1,7 +1,4 @@ // Tests the behavior of queries with a {$eq: null} or {$ne: null} predicate. -// @tags: [ -// uses_column_store_index, -// ] // (function() { "use strict"; @@ -9,7 +6,6 @@ load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. // For areAllCollectionsClustered. load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. function extractAValues(results) { return results.map(function(res) { @@ -705,11 +701,9 @@ const keyPatterns = [ {keyPattern: {"$**": 1}}, {keyPattern: {"a.$**": 1}} ]; - // Include Columnstore Index only if FF is enabled and collection is not clustered. -const columnstoreEnabled = checkSBEEnabled( - db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true /* checkAllNodes */); -if (columnstoreEnabled && !ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo())) { +if (TestData.setParameters.hasOwnProperty("featureFlagColumnstoreIndexes") && + !ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo())) { keyPatterns.push({keyPattern: {"$**": "columnstore"}}); } diff --git a/jstests/cqf/range_descending.js b/jstests/cqf/range_descending.js index 4c8fe22f9fc..3e0960b4fbb 100644 --- a/jstests/cqf/range_descending.js +++ b/jstests/cqf/range_descending.js @@ -11,7 +11,9 @@ "use strict"; load("jstests/libs/optimizer_utils.js"); + const coll = db.cqf_range_descending; + /* * This is the most basic case: a single range predicate with a descending index. */ @@ -20,10 +22,13 @@ const coll = db.cqf_range_descending; assert.commandWorked(coll.insertOne({a: 1})); const indexKey = {a: -1}; assert.commandWorked(coll.createIndex(indexKey)); + for (let i = 0; i < 100; ++i) { assert.commandWorked(coll.insert({})); } + const query = {a: {$gte: 0, $lte: 2}}; + { const res = coll.find(query).hint(indexKey).toArray(); assert.eq(res.length, 1); @@ -33,36 +38,36 @@ const coll = db.cqf_range_descending; assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); } } + /* - * Test a compound index, with a range on the leading field and a descending index on the - * secondary field. + * Test a compound index, with a range on the leading field and a descending index on the secondary + * field. */ { coll.drop(); - var bulkOp = coll.initializeOrderedBulkOp(); for (let i = 10; i <= 30; i += 10) { for (let j = 1; j <= 3; j++) { - for (let k = 0; k < 10; k++) { - bulkOp.insert({a: i, b: j}); - } + assert.commandWorked(coll.insert({a: i, b: j})); } } - for (let i = 0; i < 1000; ++i) { - bulkOp.insert({}); + for (let i = 0; i < 100; ++i) { + assert.commandWorked(coll.insert({})); } - assert.commandWorked(bulkOp.execute()); const indexKey = {a: 1, b: -1}; assert.commandWorked(coll.createIndex(indexKey)); + const query = {a: {$gte: 10, $lte: 20}, b: {$gt: 1}}; + { const res = coll.find(query).hint(indexKey).toArray(); - assert.eq(res.length, 40); + assert.eq(res.length, 4); } { const res = coll.explain("executionStats").find(query).hint(indexKey).finish(); assertValueOnPlanPath("IndexScan", res, "child.leftChild.child.nodeType"); } } + /* * Test a descending index with range predicates, ensuring that the index plan is chosen. */ @@ -74,7 +79,9 @@ const coll = db.cqf_range_descending; } const indexKey = {a: -1, b: -1}; assert.commandWorked(coll.createIndex(indexKey)); + const query = [{a: 1}, {_id: 0, a: 1, b: 1}]; + { const res = coll.find(...query).hint(indexKey).toArray(); assert.eq(res.length, 1); @@ -84,4 +91,4 @@ const coll = db.cqf_range_descending; assertValueOnPlanPath("IndexScan", res, "child.child.leftChild.nodeType"); } } -}()); +}()); \ No newline at end of file diff --git a/jstests/libs/optimizer_utils.js b/jstests/libs/optimizer_utils.js index 22483624ad9..aad1fb1d966 100644 --- a/jstests/libs/optimizer_utils.js +++ b/jstests/libs/optimizer_utils.js @@ -53,7 +53,7 @@ function leftmostLeafStage(node) { /** * Get a very simplified version of a plan, which only includes nodeType and nesting structure. */ -function getPlanSkeleton(node, recursiveKeepKeys = [], addToKeepKeys = []) { +function getPlanSkeleton(node) { const keepKeys = [ 'nodeType', @@ -64,23 +64,16 @@ function getPlanSkeleton(node, recursiveKeepKeys = [], addToKeepKeys = []) { 'children', 'leftChild', 'rightChild', - ].concat(addToKeepKeys); + ]; if (Array.isArray(node)) { return node.map(n => getPlanSkeleton(n)); } else if (node === null || typeof node !== 'object') { return node; } else { - return Object.fromEntries( - Object.keys(node) - .filter(key => (keepKeys.includes(key) || recursiveKeepKeys.includes(key))) - .map(key => { - if (recursiveKeepKeys.includes(key)) { - return [key, node[key]]; - } else { - return [key, getPlanSkeleton(node[key], recursiveKeepKeys, addToKeepKeys)]; - } - })); + return Object.fromEntries(Object.keys(node) + .filter(key => keepKeys.includes(key)) + .map(key => [key, getPlanSkeleton(node[key])])); } } diff --git a/jstests/libs/sbe_assert_error_override.js b/jstests/libs/sbe_assert_error_override.js index 233358e1743..de9feb9c1b6 100644 --- a/jstests/libs/sbe_assert_error_override.js +++ b/jstests/libs/sbe_assert_error_override.js @@ -117,16 +117,15 @@ const equivalentErrorCodesList = [ [292, 5859000, 5843600, 5843601], [6045000, 5166606], [146, 13548], - [ErrorCodes.TypeMismatch, 5156200, 5156201], - [5439100, 40517, 7003907], - [5439101, 40485, 7007908], + [5439100, 40517], + [5439101, 40485], [5439102, 5439012], - [5439103, 5439013, 7003902], - [5439104, 9, 7003903], - [5439105, 5439017, 7003904, 7003905], - [5439105, 5439018, 7003906], - [5439106, 5439015, 7003909], - [5439107, 5439016, 7003910], + [5439103, 5439013], + [5439104, 9], + [5439105, 5439017], + [5439105, 5439018], + [5439106, 5439015], + [5439107, 5439016], ]; // This map is generated based on the contents of 'equivalentErrorCodesList'. This map should _not_ diff --git a/jstests/multiVersion/genericSetFCVUsage/range_deletion_key_pattern_field_added_on_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/range_deletion_key_pattern_field_added_on_upgrade.js new file mode 100644 index 00000000000..3aacea9455b --- /dev/null +++ b/jstests/multiVersion/genericSetFCVUsage/range_deletion_key_pattern_field_added_on_upgrade.js @@ -0,0 +1,91 @@ +/** + * Test that - upon upgrade - the key pattern is added to all range deletion documents missing it + * + * @tags: [ + * multiversion_incompatible, + * featureFlagRangeDeleterService + * ] + */ + +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); + +function assertNumOfDocsWithKeyPattern(expected, rangeDeletionsColl) { + assert.eq(expected, rangeDeletionsColl.find({keyPattern: {$exists: 1}}).itcount()); +} + +function assertKeyPattern(expected, nss, rangeDeletionsColl) { + const uuid = st.getDB("config").collections.findOne({_id: nss}).uuid; + const res = rangeDeletionsColl + .aggregate([{$match: {collectionUuid: uuid}}, {$group: {"_id": "$keyPattern"}}]) + .toArray(); + + assert.eq(res.length, 1, tojson(res)); + assert.eq(expected, res[0]._id); +} + +const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); + +// Setup database +const dbName = 'db'; +const db = st.getDB(dbName); +assert.commandWorked( + st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName})); +let rangeDeletionsShard0Coll = st.shard0.getDB("config").rangeDeletions; + +// Setup test collections +const keyColl1 = { + _id: 1 +}; +const keyColl2 = { + a: 1 +}; +const coll1 = db['test1']; +const coll2 = db['test2']; +const nss1 = coll1.getFullName(); +const nss2 = coll2.getFullName(); +assert.commandWorked(st.s.adminCommand({shardCollection: nss1, key: keyColl1})); +assert.commandWorked(st.s.adminCommand({shardCollection: nss2, key: keyColl2})); + +// Lower the feature compatibility version in order to be able to trigger the upgrade code +// afterwards +assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); + +// Create two chunks for every collection +assert.commandWorked(st.s.adminCommand({split: nss1, middle: {_id: 0}})); +assert.commandWorked(st.s.adminCommand({split: nss2, middle: {a: 0}})); + +// Perform some migrations in order to create some range deletion documents on shard0 +let suspendRangeDeletionFailpoint = configureFailPoint(st.shard0, "suspendRangeDeletion"); +assert.commandWorked(db.adminCommand({moveChunk: nss1, find: {_id: 1}, to: st.shard1.shardName})); +assert.commandWorked(db.adminCommand({moveChunk: nss1, find: {_id: -1}, to: st.shard1.shardName})); +assert.commandWorked(db.adminCommand({moveChunk: nss2, find: {a: 1}, to: st.shard1.shardName})); +assert.commandWorked(db.adminCommand({moveChunk: nss2, find: {a: -1}, to: st.shard1.shardName})); + +// The `keyPattern` field is properly set on range deletion documents created by the new binary +assertNumOfDocsWithKeyPattern(4, rangeDeletionsShard0Coll); + +// Unset the `keyPattern` field from all range deletion documents (pretend they were created while +// running an older binary) +assert.commandWorked(st.shard0.getDB("config").runCommand({ + update: "rangeDeletions", + updates: [{q: {}, u: {$unset: {"keyPattern": ""}}, multi: true, upsert: false}] +})); + +assertNumOfDocsWithKeyPattern(0, rangeDeletionsShard0Coll); + +// Upgrade the cluster to the latest FCV +assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + +// Check `keyPattern` field has been implicitly added to every range deletion document +assertNumOfDocsWithKeyPattern(4, rangeDeletionsShard0Coll); +assertKeyPattern(keyColl1, nss1, rangeDeletionsShard0Coll); +assertKeyPattern(keyColl2, nss2, rangeDeletionsShard0Coll); + +// Allow everything to finish +suspendRangeDeletionFailpoint.off(); + +st.stop(); +})(); diff --git a/jstests/noPassthrough/libs/index_build.js b/jstests/noPassthrough/libs/index_build.js index 6577df94016..4e4775cb64d 100644 --- a/jstests/noPassthrough/libs/index_build.js +++ b/jstests/noPassthrough/libs/index_build.js @@ -358,8 +358,8 @@ const ResumableIndexBuildTest = class { } /** - * Runs createIndexFn in a parellel shell to create indexes, modifying the collection with the - * side writes table. + * Runs createIndexFn in a parellel shell to create indexes, inserting the documents specified + * by sideWrites into the side writes table. * * 'createIndexFn' should take three parameters: collection name, index specifications, and * index names. @@ -368,10 +368,6 @@ const ResumableIndexBuildTest = class { * is [[{a: 1}, {b: 1}], [{c: 1}]], a valid indexNames would look like * [["index_1", "index_2"], ["index_3"]]. * - * 'sideWrites' can be an array specifying documents to be inserted into the side writes table, - * or a function that performs any series of operations (inserts, deletes, or updates) with the - * side writes table - * * If {hangBeforeBuildingIndex: true}, returns with the hangBeforeBuildingIndex failpoint * enabled and the index builds hanging at this point. */ @@ -415,11 +411,7 @@ const ResumableIndexBuildTest = class { }); } - if (Array.isArray(sideWrites)) { - assert.commandWorked(coll.insert(sideWrites)); - } else { - sideWrites(coll); - } + assert.commandWorked(coll.insert(sideWrites)); // Before building the index, wait for the the last op to be committed so that establishing // the majority read cursor does not race with step down. diff --git a/jstests/noPassthrough/predictive_connpool.js b/jstests/noPassthrough/predictive_connpool.js index dbe517a5667..be307a6e318 100644 --- a/jstests/noPassthrough/predictive_connpool.js +++ b/jstests/noPassthrough/predictive_connpool.js @@ -105,7 +105,7 @@ function hasConnPoolStats(args) { return hosts.map(host => checkStats(res, host)).every(x => x); } - assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10 * 1000); + assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000); jsTestLog("Check #" + checkNum + " successful"); } diff --git a/jstests/noPassthrough/restart_index_build_if_resume_fails.js b/jstests/noPassthrough/restart_index_build_if_resume_fails.js index 0cee7035e63..e166465d2c6 100644 --- a/jstests/noPassthrough/restart_index_build_if_resume_fails.js +++ b/jstests/noPassthrough/restart_index_build_if_resume_fails.js @@ -55,55 +55,33 @@ ResumableIndexBuildTest.runFailToResume(rst, [{a: 10}, {a: 11}], [{a: 12}, {a: 13}]); +// TODO (SERVER-65978): Add side writes to these test cases once they are supported by column store +// index builds. if (columnstoreEnabled) { - ResumableIndexBuildTest.runFailToResume( - rst, - dbName, - collName, - {"$**": "columnstore"}, - {failPointAfterStartup: "failToParseResumeIndexInfo"}, - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 14}]}, {a: 15}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 14})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 15}, {a: 1})); - }), - [{a: 16}, {a: 17}], - true /* failWhileParsing */); + ResumableIndexBuildTest.runFailToResume(rst, + dbName, + collName, + {"$**": "columnstore"}, + {failPointAfterStartup: "failToParseResumeIndexInfo"}, + [], + [{a: 4}, {a: 5}], + true /* failWhileParsing */); - ResumableIndexBuildTest.runFailToResume( - rst, - dbName, - collName, - {"$**": "columnstore"}, - {failPointAfterStartup: "failSetUpResumeIndexBuild"}, - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 18}]}, {a: 19}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 18})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 19}, {a: 1})); - }), - [{a: 20}, {a: 21}]); + ResumableIndexBuildTest.runFailToResume(rst, + dbName, + collName, + {"$**": "columnstore"}, + {failPointAfterStartup: "failSetUpResumeIndexBuild"}, + [], + [{a: 8}, {a: 9}]); - ResumableIndexBuildTest.runFailToResume( - rst, - dbName, - collName, - {"$**": "columnstore"}, - {removeTempFilesBeforeStartup: true}, - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 22}]}, {a: 23}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 22})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 23}, {a: 1})); - }), - [{a: 24}, {a: 25}]); + ResumableIndexBuildTest.runFailToResume(rst, + dbName, + collName, + {"$**": "columnstore"}, + {removeTempFilesBeforeStartup: true}, + [], + [{a: 12}, {a: 13}]); } rst.stopSet(); diff --git a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js index 3a4a84880f6..a33f279e9a4 100644 --- a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js +++ b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js @@ -51,6 +51,8 @@ ResumableIndexBuildTest.runResumeInterruptedByShutdown( [{a: 77}, {a: 88}], [{a: 99}, {a: 100}]); +// TODO (SERVER-65978): Add side writes to these test cases once they are supported by column store +// index builds. if (columnstoreEnabled) { ResumableIndexBuildTest.runResumeInterruptedByShutdown( rst, @@ -61,15 +63,8 @@ if (columnstoreEnabled) { {name: "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion", logIdWithBuildUUID: 20386}, "collection scan", {a: 1}, // initial doc - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 14}]}, {a: 15}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 14})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 2})); - assert.commandWorked(collection.update({a: 15}, {a: 2})); - }), - [{a: 16}, {a: 17}]); + [], + [{a: 4}, {a: 5}]); ResumableIndexBuildTest.runResumeInterruptedByShutdown( rst, @@ -79,15 +74,8 @@ if (columnstoreEnabled) { "resumable_index_build4", // index name {name: "hangIndexBuildDuringBulkLoadPhase", logIdWithIndexName: 4924400}, "bulk load", - {a: [44, 55, 66]}, // initial doc - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 77}]}, {a: 88}])); - assert.commandWorked(collection.update({a: [44, 55, 66]}, {a: [55, 66]})); - assert.commandWorked(collection.remove({"a.b": 77})); - assert.commandWorked(collection.insert({a: 99})); - assert.commandWorked(collection.remove({a: [55, 66]})); - assert.commandWorked(collection.update({a: 99}, {a: 1})); - }), + {a: [11, 22, 33]}, // initial doc + [], [{a: 99}, {a: 100}]); } rst.stopSet(); diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js index bf70ffffb1c..088759a3a81 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js @@ -12,7 +12,6 @@ "use strict"; load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. const dbName = "test"; @@ -20,9 +19,6 @@ const rst = new ReplSetTest({nodes: 1}); rst.startSet(); rst.initiate(); -const columnstoreEnabled = checkSBEEnabled( - rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true); - const runTests = function(docs, indexSpecsFlat, sideWrites, collNameSuffix) { const coll = rst.getPrimary().getDB(dbName).getCollection(jsTestName() + collNameSuffix); assert.commandWorked(coll.insert(docs)); @@ -57,18 +53,6 @@ runTests({a: 1}, [{"$**": 1}, {h: 1}], [{a: [1, 2], b: {c: [3, 4]}, d: ""}, {e: "", f: [[]], g: null, h: 8}], "_wildcard"); -if (columnstoreEnabled) { - runTests({a: 1}, - [{"$**": "columnstore"}, {h: 1}], - (function(collection) { - assert.commandWorked(collection.insert([{a: [{c: 2}], b: 2}, {a: 3, b: 3}])); - assert.commandWorked(collection.update({a: 3}, {a: 4, b: 3})); - assert.commandWorked(collection.remove({"a.c": 2})); - assert.commandWorked(collection.insert({a: 4, b: 4})); - assert.commandWorked(collection.remove({b: 3})); - assert.commandWorked(collection.update({a: 4}, {a: 2})); - }), - "_columnstore"); -} + rst.stopSet(); })(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js index 8e6f529c8c4..8bc4c431246 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js @@ -13,7 +13,6 @@ "use strict"; load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. const dbName = "test"; const collName = jsTestName(); @@ -25,9 +24,6 @@ rst.initiate(); const primary = rst.getPrimary(); const coll = primary.getDB(dbName).getCollection(collName); -const columnstoreEnabled = checkSBEEnabled( - primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true); - assert.commandWorked(coll.insert({a: 1})); jsTestLog("Testing when primary shuts down in the middle of the first drain"); @@ -80,83 +76,5 @@ ResumableIndexBuildTest.runOnPrimaryToTestCommitQuorum( [{a: 14}, {a: 15}], [{a: 16}, {a: 17}]); -if (columnstoreEnabled) { - ResumableIndexBuildTest.run( - rst, - dbName, - collName, - [[{"$**": "columnstore"}]], - [{name: "hangIndexBuildDuringDrainWritesPhase", logIdWithIndexName: 4841800}], - 0, - ["drain writes"], - [{skippedPhaseLogID: 20392}], - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 10}]}, {a: 11}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 10})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 11}, {a: 1})); - }), - [{a: 12}, {a: 13}]); - ResumableIndexBuildTest.run( - rst, - dbName, - collName, - [[{"$**": "columnstore"}]], - [{name: "hangIndexBuildDuringDrainWritesPhase", logIdWithIndexName: 4841800}], - 1, - ["drain writes"], - [{skippedPhaseLogID: 20392}], - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 14}]}, {a: 15}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 14})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 15}, {a: 1})); - }), - [{a: 16}, {a: 17}]); - - jsTestLog("Testing when primary shuts down after voting, but before commit quorum satisfied"); - - ResumableIndexBuildTest.runOnPrimaryToTestCommitQuorum( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangIndexBuildAfterSignalPrimaryForCommitReadiness", - "hangAfterIndexBuildFirstDrain", - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 22}]}, {a: 23}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 22})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 23}, {a: 1})); - }), - [{a: 24}, {a: 25}]); - - jsTestLog( - "Testing when primary shuts down after commit quorum satisfied, but before commitIndexBuild oplog entry written"); - - ResumableIndexBuildTest.runOnPrimaryToTestCommitQuorum( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangIndexBuildAfterSignalPrimaryForCommitReadiness", - "hangIndexBuildAfterSignalPrimaryForCommitReadiness", - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 30}]}, {a: 31}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 30})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 31}, {a: 1})); - }), - [{a: 32}, {a: 33}]); -} - rst.stopSet(); })(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js index 04bc57ad2a8..1163d0664c4 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js @@ -16,7 +16,6 @@ "use strict"; load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. const dbName = "test"; const collName = jsTestName(); @@ -33,9 +32,6 @@ rst.initiate(); let primary = rst.getPrimary(); let coll = primary.getDB(dbName).getCollection(collName); -const columnstoreEnabled = checkSBEEnabled( - primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true); - assert.commandWorked(coll.insert({a: 1})); jsTestLog("Testing when secondary shuts down in the middle of the first drain"); @@ -84,84 +80,5 @@ ResumableIndexBuildTest.runOnSecondary(rst, [{a: 14}, {a: 15}], [{a: 16}, {a: 17}]); -if (columnstoreEnabled) { - jsTestLog("Testing when secondary shuts down in the middle of the first drain"); - ResumableIndexBuildTest.runOnSecondary( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangIndexBuildDuringDrainWritesPhase", - 0, - undefined, /* primaryFailPointName */ - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 10}]}, {a: 11}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 10})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 11}, {a: 1})); - }), - [{a: 12}, {a: 13}]); - ResumableIndexBuildTest.runOnSecondary( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangIndexBuildDuringDrainWritesPhase", - 1, - undefined, /* primaryFailPointName */ - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 14}]}, {a: 15}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 14})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 15}, {a: 1})); - }), - [{a: 16}, {a: 17}]); - - jsTestLog("Testing when secondary shuts down before voting"); - - ResumableIndexBuildTest.runOnSecondary( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangAfterIndexBuildFirstDrain", - {}, - undefined, /* primaryFailPointName */ - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 18}]}, {a: 19}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 18})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 19}, {a: 1})); - }), - [{a: 20}, {a: 21}]); - - jsTestLog( - "Testing when secondary shuts down after commit quorum satisfied, but before replicating commitIndexBuild oplog entry"); - - ResumableIndexBuildTest.runOnSecondary( - rst, - dbName, - collName, - {"$**": "columnstore"}, - "hangIndexBuildAfterSignalPrimaryForCommitReadiness", - {}, - "hangIndexBuildBeforeCommit", - (function(collection) { - assert.commandWorked(collection.insert([{a: [{b: 26}]}, {a: 27}])); - assert.commandWorked(collection.update({a: 1}, {a: 2})); - assert.commandWorked(collection.remove({"a.b": 26})); - assert.commandWorked(collection.insert({a: 1})); - assert.commandWorked(collection.remove({a: 1})); - assert.commandWorked(collection.update({a: 27}, {a: 1})); - }), - [{a: 28}, {a: 29}]); -} - rst.stopSet(); })(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_mixed_phases.js b/jstests/noPassthrough/resumable_index_build_mixed_phases.js index 0e165510e85..b62da7449e6 100644 --- a/jstests/noPassthrough/resumable_index_build_mixed_phases.js +++ b/jstests/noPassthrough/resumable_index_build_mixed_phases.js @@ -61,15 +61,6 @@ const runTests = function(failPoints, resumePhases, resumeChecks) { resumePhases, resumeChecks, "_wildcard"); - - if (columnstoreEnabled) { - runTest([{a: 1, b: 1}, {a: 2, b: 2}, {a: 3, b: 3}], - [[{"$**": "columnstore"}], [{b: 1}]], - failPoints, - resumePhases, - resumeChecks, - "_columnstore"); - } }; runTests( @@ -114,5 +105,59 @@ runTests( ], ["bulk load", "drain writes"], [{skippedPhaseLogID: 20391}, {skippedPhaseLogID: 20392}]); + +// TODO (SERVER-65978): Add sidewrites to tests and combine columnTests with normal runTests once +// side writes are implemented as the numbers for numScannedAfterResume will match +if (columnstoreEnabled) { + const runColumnTests = function(failPoints, resumePhases, resumeChecks) { + const docs = [{a: 1, b: 1}, {a: 2, b: 2}, {a: 3, b: 3}]; + const coll = rst.getPrimary().getDB(dbName).getCollection( + jsTestName() + "_" + resumePhases[0].replace(" ", "_") + "_" + + resumePhases[1].replace(" ", "_") + "_columnstore"); + assert.commandWorked(coll.insert(docs)); + + ResumableIndexBuildTest.run(rst, + dbName, + coll.getName(), + [[{b: 1}], [{"$**": "columnstore"}]], + failPoints, + 1, + resumePhases, + resumeChecks, + [], + [{a: 7, b: 7}, {a: 8, b: 8}, {a: 9, b: 9}]); + }; + + runColumnTests( + [ + {name: "hangIndexBuildBeforeWaitingUntilMajorityOpTime", logIdWithBuildUUID: 4940901}, + { + name: "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion", + logIdWithBuildUUID: 20386 + } + ], + ["initialized", "collection scan"], + [{numScannedAfterResume: 3}, {numScannedAfterResume: 2}]); + + runColumnTests( + [ + {name: "hangIndexBuildBeforeWaitingUntilMajorityOpTime", logIdWithBuildUUID: 4940901}, + {name: "hangIndexBuildDuringBulkLoadPhase", logIdWithIndexName: 4924400} + + ], + ["initialized", "bulk load"], + [{numScannedAfterResume: 3}, {skippedPhaseLogID: 20391}]); + + runColumnTests( + [ + { + name: "hangIndexBuildDuringCollectionScanPhaseBeforeInsertion", + logIdWithBuildUUID: 20386 + }, + {name: "hangIndexBuildDuringBulkLoadPhase", logIdWithIndexName: 4924400} + ], + ["collection scan", "bulk load"], + [{numScannedAfterResume: 2}, {skippedPhaseLogID: 20391}]); +} rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js b/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js index d73cb9c7d7b..d875a0ac71b 100644 --- a/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js +++ b/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js @@ -12,11 +12,14 @@ (function() { "use strict"; -load('jstests/libs/uuid_util.js'); +function uuidToString(uuid) { + const [_, uuidString] = uuid.toString().match(/"((?:\\.|[^"\\])*)"/); + return uuidString; +} function entriesInContainer(primary, uuid) { return primary.getDB("system") - .getCollection("globalIndexes." + extractUUIDFromObject(uuid)) + .getCollection("globalIndexes." + uuidToString(uuid)) .find() .itcount(); } @@ -446,7 +449,7 @@ assert.commandFailedWithCode(adminDB.runCommand({_shardsvrWriteGlobalIndexKeys: // triggering DuplicateKey error on inserting the same index key. assert.eq(1, primary.getDB("system") - .getCollection("globalIndexes." + extractUUIDFromObject(globalIndexUUID)) + .getCollection("globalIndexes." + uuidToString(globalIndexUUID)) .find({_id: {sk0: "first", _id: "first"}}) .itcount()); session.startTransaction(); @@ -459,7 +462,7 @@ assert.commandFailedWithCode(adminDB.runCommand({_shardsvrWriteGlobalIndexKeys: session.abortTransaction(); assert.eq(1, primary.getDB("system") - .getCollection("globalIndexes." + extractUUIDFromObject(otherGlobalIndexUUID)) + .getCollection("globalIndexes." + uuidToString(otherGlobalIndexUUID)) .find({_id: {sk0: "secondOnSecondContainer", _id: "secondOnSecondContainer"}}) .itcount()); session.startTransaction(); @@ -503,12 +506,12 @@ assert.commandFailedWithCode(adminDB.runCommand({_shardsvrWriteGlobalIndexKeys: // triggering DuplicateKey error on inserting the same index key. assert.eq(1, primary.getDB("system") - .getCollection("globalIndexes." + extractUUIDFromObject(globalIndexUUID)) + .getCollection("globalIndexes." + uuidToString(globalIndexUUID)) .find({_id: {sk0: "globalIndexKey", _id: "globalIndexKey"}}) .itcount()); assert.eq(1, primary.getDB("system") - .getCollection("globalIndexes." + extractUUIDFromObject(globalIndexUUID)) + .getCollection("globalIndexes." + uuidToString(globalIndexUUID)) .find({_id: {sk0: "globalIndexKey2", _id: "globalIndexKey"}}) .itcount()); session.startTransaction(); diff --git a/jstests/noPassthrough/shardsvr_global_index_ddl.js b/jstests/noPassthrough/shardsvr_global_index_ddl.js index 39a0205ec25..c47df2a1c8c 100644 --- a/jstests/noPassthrough/shardsvr_global_index_ddl.js +++ b/jstests/noPassthrough/shardsvr_global_index_ddl.js @@ -11,7 +11,11 @@ "use strict"; load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. -load('jstests/libs/uuid_util.js'); + +function uuidToString(uuid) { + const [_, uuidString] = uuid.toString().match(/"((?:\\.|[^"\\])*)"/); + return uuidString; +} function verifyCollectionExists(node, globalIndexUUID, namespace) { const systemDB = node.getDB("system"); @@ -76,11 +80,6 @@ function verifyOplogEntry(node, globalIndexUUID, namespace, commandString, lsid, assert.eq(oplogEntry.op, "c"); assert.eq(oplogEntry.ui, globalIndexUUID); assert.docEq(oplogEntry.o, commandStringNamespaceObj); - if (commandString === "dropGlobalIndex") { - assert.eq(0, oplogEntry.o2.numRecords); - } else { - assert.eq(undefined, oplogEntry.o2); - } // lsid and txnNumber are either both present (retryable writes) or absent. assert((lsid && txnNumber) || (!lsid && !txnNumber)); @@ -97,7 +96,7 @@ function verifyCommandIsRetryableWrite(node, command, oplogCommandString, setup) const lsid = session.getSessionId(); const txnNumber = NumberLong(10); const indexUUID = UUID(); - const globalIndexCollName = "globalIndexes." + extractUUIDFromObject(indexUUID); + const globalIndexCollName = "globalIndexes." + uuidToString(indexUUID); var commandObj = {}; commandObj[command] = indexUUID; @@ -148,7 +147,7 @@ function testCreateGlobalIndex(rst) { const primary = rst.getPrimary(); const adminDB = primary.getDB("admin"); const globalIndexUUID = UUID(); - const globalIndexCollName = "globalIndexes." + extractUUIDFromObject(globalIndexUUID); + const globalIndexCollName = "globalIndexes." + uuidToString(globalIndexUUID); const oplogCommandString = "createGlobalIndex"; verifyMultiDocumentTransactionDisallowed(primary, {_shardsvrCreateGlobalIndex: UUID()}); @@ -174,7 +173,7 @@ function testDropGlobalIndex(rst) { const primary = rst.getPrimary(); const adminDB = primary.getDB("admin"); const globalIndexUUID = UUID(); - const globalIndexCollName = "globalIndexes." + extractUUIDFromObject(globalIndexUUID); + const globalIndexCollName = "globalIndexes." + uuidToString(globalIndexUUID); const oplogCommandString = "dropGlobalIndex"; verifyMultiDocumentTransactionDisallowed(primary, {_shardsvrDropGlobalIndex: UUID()}); diff --git a/jstests/noPassthrough/shell_data_consistency_checker_get_diff.js b/jstests/noPassthrough/shell_data_consistency_checker_get_diff.js deleted file mode 100644 index 7be4de22e86..00000000000 --- a/jstests/noPassthrough/shell_data_consistency_checker_get_diff.js +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Tests DataConsistencyChecker.getDiff() correctly reports mismatched and missing documents. - */ -(function() { -"use strict"; - -class ArrayCursor { - constructor(arr) { - this.i = 0; - this.arr = arr; - } - - hasNext() { - return this.i < this.arr.length; - } - - next() { - return this.arr[this.i++]; - } -} - -let diff = DataConsistencyChecker.getDiff( - new ArrayCursor([{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]), - new ArrayCursor([{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}])); -assert.eq(diff, { - docsWithDifferentContents: [], - docsMissingOnFirst: [], - docsMissingOnSecond: [], -}); - -diff = DataConsistencyChecker.getDiff( - new ArrayCursor([{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]), - new ArrayCursor([{_id: 1, y: 1}, {_id: 2, y: 2}, {_id: 3, y: 3}])); -assert.eq(diff, { - docsWithDifferentContents: [ - {first: {_id: 1, x: 1}, second: {_id: 1, y: 1}}, - {first: {_id: 2, x: 2}, second: {_id: 2, y: 2}}, - {first: {_id: 3, x: 3}, second: {_id: 3, y: 3}}, - ], - docsMissingOnFirst: [], - docsMissingOnSecond: [], -}); - -diff = DataConsistencyChecker.getDiff( - new ArrayCursor([{_id: 3, x: 3}]), - new ArrayCursor([{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}])); -assert.eq(diff, { - docsWithDifferentContents: [], - docsMissingOnFirst: [{_id: 1, x: 1}, {_id: 2, x: 2}], - docsMissingOnSecond: [], -}); - -diff = DataConsistencyChecker.getDiff( - new ArrayCursor([{_id: 2, x: 2}, {_id: 4, x: 4}]), - new ArrayCursor([{_id: 1, y: 1}, {_id: 2, y: 2}, {_id: 3, y: 3}])); -assert.eq(diff, { - docsWithDifferentContents: [{first: {_id: 2, x: 2}, second: {_id: 2, y: 2}}], - docsMissingOnFirst: [{_id: 1, y: 1}, {_id: 3, y: 3}], - docsMissingOnSecond: [{_id: 4, x: 4}], -}); -})(); diff --git a/jstests/noPassthrough/slow_session_workflow_log.js b/jstests/noPassthrough/slow_session_workflow_log.js deleted file mode 100644 index bc01f0d381a..00000000000 --- a/jstests/noPassthrough/slow_session_workflow_log.js +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Verifies that the SessionWorkflow provides a slow loop log when appropriate. - * @tags: [ - * requires_sharding, - * multiversion_incompatible - * ] - */ -(function() { -'use strict'; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/log.js"); // For findMatchingLogLine - -const expectedLogId = 6983000; -const sleepMillisInQueryFilter = 200; -const sleepMillisBetweenQueries = 100; - -/* -TODO(SERVER-69831): enable when we have slow loop crieteria -const expectedFields = [ - "totalElapsedMillis", - "activeElapsedMillis", - "sourceWorkElapsedMillis", - "processMessageElapsedMillis", - "sendMessageElapsedMillis", - "finalizeElapsedMillis", -]; -*/ - -function runTest(conn) { - // Build a new connection based on the replica set URL - let coll = conn.getCollection("test.foo"); - coll.drop(); - - // TODO(SERVER-69831): remove, and actually test under which conditions the log appears. - configureFailPoint(conn, "alwaysLogSlowSessionWorkflow"); - - assert.commandWorked(assert.commandWorked(coll.insert({_id: 1}))); - - // Do a query that we would expect to be fast. - let count = coll.find({}).toArray(); - assert.eq(count.length, 1, "expected 1 document"); - - // TODO(SERVER-69831): Expect no slow loop logs. - - // This sleep should show up as part of sourceWorkElapsedMillis. - sleep(sleepMillisBetweenQueries); - - // Do a slow query beyond the 100ms threshold. Make sure the slow loop log line exists. - count = - coll.find({$where: 'function() { sleep(' + sleepMillisInQueryFilter + '); return true; }'}) - .toArray(); - assert.eq(count.length, 1, "expected 1 document"); - - let allLogLines = checkLog.getGlobalLog(conn); - var slowLoopLogLine; - assert.soon(() => { - slowLoopLogLine = findMatchingLogLine(allLogLines, {id: expectedLogId}); - return slowLoopLogLine !== null; - }, "Couldn't find slow loop log line"); - - /* - const slowLoopObj = JSON.parse(slowLoopLogLine); - TODO(SERVER-69831): enable when we have a single slow loop log. - expectedFields.forEach((expectedField) => { - assert(slowLoopObj.attr[expectedField]); - }); - */ - - // TODO(SERVER-69831): Expect that sourceWorkElapsedMillis and processMessageElapsedMillis are - // each greater than their respective sleeps, and totalElapsedMillis >= their sum. -} - -// Test standalone. -const m = MongoRunner.runMongod(); -runTest(m); -MongoRunner.stopMongod(m); - -// Test sharded. -const st = new ShardingTest({shards: 1, mongos: 1}); -runTest(st.s0); -st.stop(); -})(); diff --git a/jstests/noPassthrough/spill_to_disk_secondary_read.js b/jstests/noPassthrough/spill_to_disk_secondary_read.js index fd9609fe2ae..b9b09104138 100644 --- a/jstests/noPassthrough/spill_to_disk_secondary_read.js +++ b/jstests/noPassthrough/spill_to_disk_secondary_read.js @@ -9,28 +9,36 @@ load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages. load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -const kNumNodes = 3; const replTest = new ReplSetTest({ - nodes: kNumNodes, + nodes: 3, }); replTest.startSet(); replTest.initiate(); +function setLog(db) { + db.setLogLevel(5, 'command'); + db.setLogLevel(5, 'index'); + db.setLogLevel(5, 'query'); + db.setLogLevel(5, 'replication'); + db.setLogLevel(5, 'write'); +} + /** * Setup the primary and secondary collections. */ let primary = replTest.getPrimary(); -let bulk = primary.getDB("test").foo.initializeUnorderedBulkOp(); +setLog(primary.getDB("test")); +const insertColl = primary.getDB("test").foo; const cRecords = 50; for (let i = 0; i < cRecords; ++i) { // We'll be using a unique 'key' field for group & lookup, but we cannot use '_id' for this, // because '_id' is indexed and would trigger Indexed Loop Join instead of Hash Join. - bulk.insert({key: i, string: "test test test"}); + assert.commandWorked(insertColl.insert({key: i, string: "test test test"})); } -assert.commandWorked(bulk.execute({w: kNumNodes, wtimeout: 5000})); let secondary = replTest.getSecondary(); +setLog(secondary.getDB("test")); const readColl = secondary.getDB("test").foo; /** diff --git a/jstests/noPassthrough/startup_with_missing_id_index.js b/jstests/noPassthrough/startup_with_missing_id_index.js index 6515cbef8f7..5dbe867f34f 100644 --- a/jstests/noPassthrough/startup_with_missing_id_index.js +++ b/jstests/noPassthrough/startup_with_missing_id_index.js @@ -1,7 +1,6 @@ /** * Tests that the server will startup normally when a collection has a missing id index. * - * @tags: [requires_persistence] */ (function() { "use strict"; diff --git a/jstests/noPassthroughWithMongod/big_predicate.js b/jstests/noPassthroughWithMongod/big_predicate.js index ac643616b5c..8cc991d3d91 100644 --- a/jstests/noPassthroughWithMongod/big_predicate.js +++ b/jstests/noPassthroughWithMongod/big_predicate.js @@ -8,8 +8,7 @@ const coll = db.big_predicate; coll.drop(); let filter = {}; -// TODO SERVER-70110: Revert number of branches to 2500. -for (let i = 0; i < 25; ++i) { +for (let i = 0; i < 2500; ++i) { filter["field" + i] = i; } diff --git a/jstests/query_golden/expected_output/extraneous_project b/jstests/query_golden/expected_output/extraneous_project deleted file mode 100644 index bf06bad74e9..00000000000 --- a/jstests/query_golden/expected_output/extraneous_project +++ /dev/null @@ -1,63 +0,0 @@ - - -[jsTest] ---- -[jsTest] Query: [ { "$match" : { "username" : "/^user8/" } }, { "$project" : { "username" : 1 } }, { "$group" : { "_id" : 1, "count" : { "$sum" : 1 } } } ] -[jsTest] ---- - - -nReturned: 0 -Plan skeleton: { - "queryPlanner" : { - "winningPlan" : { - "optimizerPlan" : { - "nodeType" : "Root", - "child" : { - "nodeType" : "Evaluation", - "child" : { - "nodeType" : "GroupBy", - "child" : { - "nodeType" : "Evaluation", - "child" : { - "nodeType" : "Filter", - "child" : { - "nodeType" : "PhysicalScan" - } - } - } - } - } - } - } - } -} - -[jsTest] ---- -[jsTest] Query: [ { "$match" : { "username" : "/^user8/" } }, { "$group" : { "_id" : 1, "count" : { "$sum" : 1 } } } ] -[jsTest] ---- - - -nReturned: 0 -Plan skeleton: { - "queryPlanner" : { - "winningPlan" : { - "optimizerPlan" : { - "nodeType" : "Root", - "child" : { - "nodeType" : "Evaluation", - "child" : { - "nodeType" : "GroupBy", - "child" : { - "nodeType" : "Evaluation", - "child" : { - "nodeType" : "Filter", - "child" : { - "nodeType" : "PhysicalScan" - } - } - } - } - } - } - } - } -} \ No newline at end of file diff --git a/jstests/query_golden/extraneous_project.js b/jstests/query_golden/extraneous_project.js deleted file mode 100644 index 97b31e57f97..00000000000 --- a/jstests/query_golden/extraneous_project.js +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Tests that a $project which does not have an overall effect on the query is optimized out of the - * final plan. - * @tags: [ - * # Checks for 'IndexScan' node in explain. - * requires_cqf, - * ] - */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton. - -const coll = db.cqf_extraneous_project; -coll.drop(); -assert.commandWorked(coll.insert([ - {username: "user8", a: 1}, - {username: "user9", a: 1}, - {username: "user8", a: 2}, - {username: "user7", a: 2}, - {username: "user8", a: 3} -])); - -function run(pipeline) { - jsTestLog(`Query: ${tojsononeline(pipeline)}`); - show(coll.aggregate(pipeline)); - const explain = coll.explain("executionStats").aggregate(pipeline); - print(`nReturned: ${explain.executionStats.nReturned}\n`); - print(`Plan skeleton: `); - printjson(getPlanSkeleton(explain)); -} - -run([ - {$match: {username: "/^user8/"}}, - {$project: {username: 1}}, - {$group: {_id: 1, count: {$sum: 1}}} -]); - -run([{$match: {username: "/^user8/"}}, {$group: {_id: 1, count: {$sum: 1}}}]); -})(); diff --git a/jstests/replsets/global_index_ddl_rollback.js b/jstests/replsets/global_index_ddl_rollback.js new file mode 100644 index 00000000000..5243f6d4d3d --- /dev/null +++ b/jstests/replsets/global_index_ddl_rollback.js @@ -0,0 +1,56 @@ +/** + * Tests that global index container ddl operations can be rolled back. + * + * @tags: [ + * featureFlagGlobalIndexes, + * requires_fcv_62, + * requires_replication, + * ] + */ +(function() { +'use strict'; + +load('jstests/replsets/libs/rollback_test.js'); + +function uuidToNss(uuid) { + const [_, uuidString] = uuid.toString().match(/"((?:\\.|[^"\\])*)"/); + return "globalIndexes." + uuidString; +} + +const rollbackTest = new RollbackTest(jsTestName()); + +const primary = rollbackTest.getPrimary(); +const adminDB = primary.getDB("admin"); +const globalIndexCreateUUID = UUID(); +const globalIndexDropUUID = UUID(); + +// Create a global index container to be dropped. +assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: globalIndexDropUUID})); + +rollbackTest.transitionToRollbackOperations(); + +// Create a global index container to be rolled back. +assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: globalIndexCreateUUID})); +// Drop a global index container, operation should be rolled back. +assert.commandWorked(adminDB.runCommand({_shardsvrDropGlobalIndex: globalIndexDropUUID})); + +// Perform the rollback. +rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); +rollbackTest.transitionToSyncSourceOperationsDuringRollback(); +rollbackTest.transitionToSteadyStateOperations(); + +rollbackTest.getTestFixture().nodes.forEach(function(node) { + const nodeDB = node.getDB("system"); + + // Check globalIndexCreateUUID creation is rolled back and does not exist. + var res = + nodeDB.runCommand({listCollections: 1, filter: {name: uuidToNss(globalIndexCreateUUID)}}); + assert.eq(res.cursor.firstBatch.length, 0); + + // Check globalIndexDropUUID drop is rolled back and still exists. + res = nodeDB.runCommand({listCollections: 1, filter: {name: uuidToNss(globalIndexDropUUID)}}); + assert.eq(res.cursor.firstBatch.length, 1); +}); + +rollbackTest.stop(); +})(); diff --git a/jstests/replsets/global_index_rollback.js b/jstests/replsets/global_index_rollback.js deleted file mode 100644 index de8a599d9cb..00000000000 --- a/jstests/replsets/global_index_rollback.js +++ /dev/null @@ -1,537 +0,0 @@ -/** - * Tests replication rollback of global index container DDL and CRUD operations. - * Validates the generation of rollback files and efficient restoring of fast-counts. - * - * @tags: [ - * featureFlagGlobalIndexes, - * requires_fcv_62, - * requires_replication, - * ] - */ - -(function() { -'use strict'; - -load('jstests/replsets/libs/rollback_files.js'); -load('jstests/replsets/libs/rollback_test.js'); -load('jstests/libs/uuid_util.js'); - -function uuidToCollName(uuid) { - return "globalIndexes." + extractUUIDFromObject(uuid); -} - -const rollbackTest = new RollbackTest(jsTestName()); - -function rollbackDDLOps() { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const globalIndexCreateUUID = UUID(); - const globalIndexDropUUID = UUID(); - jsTestLog("rollbackDDLOps primary=" + node); - - // Create a global index container whose drop won't be majority-committed. - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: globalIndexDropUUID})); - - rollbackTest.transitionToRollbackOperations(); - - // Create a global index container that's not majority-committed. - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: globalIndexCreateUUID})); - // Drop a global index container, the operation is not majority-committed. - assert.commandWorked(adminDB.runCommand({_shardsvrDropGlobalIndex: globalIndexDropUUID})); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // Check globalIndexCreateUUID creation is rolled back and does not exist. - var res = node.getDB("system").runCommand( - {listCollections: 1, filter: {name: uuidToCollName(globalIndexCreateUUID)}}); - assert.eq(res.cursor.firstBatch.length, 0); - - // Check globalIndexDropUUID drop is rolled back and still exists. - res = node.getDB("system").runCommand( - {listCollections: 1, filter: {name: uuidToCollName(globalIndexDropUUID)}}); - assert.eq(res.cursor.firstBatch.length, 1); - - // Log calls out that the two commands have been rolled back. - assert(checkLog.checkContainsWithCountJson( - node, - 21656, - { - "oplogEntry": { - "op": "c", - "ns": "system.$cmd", - "ui": {"$uuid": extractUUIDFromObject(globalIndexDropUUID)}, - "o": {"dropGlobalIndex": uuidToCollName(globalIndexDropUUID)}, - "o2": {"numRecords": 0} - } - }, - 1, - null, - true /*isRelaxed*/)); - assert(checkLog.checkContainsWithCountJson( - node, - 21656, - { - "oplogEntry": { - "op": "c", - "ns": "system.$cmd", - "ui": {"$uuid": extractUUIDFromObject(globalIndexCreateUUID)}, - "o": {"createGlobalIndex": uuidToCollName(globalIndexCreateUUID)} - } - }, - 1, - null, - true /*isRelaxed*/)); -} - -// Rollback a single index key insert. -function rollbackSingleKeyInsert(bulk) { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const uuid = UUID(); - jsTestLog("rollbackSingleKeyInsert uuid=" + uuid + ", bulk=" + bulk, ", primary=" + node); - - const collName = uuidToCollName(uuid); - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - assert.eq(0, node.getDB("system").getCollection(collName).find().itcount()); - - const keyMajorityCommitted = {key: {a: 0}, docKey: {sk: 0, _id: 0}}; - const keyToRollback = {key: {a: 1}, docKey: {sk: 1, _id: 1}}; - - // Insert a key, majority-committed. - { - const session = node.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("system").runCommand( - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyMajorityCommitted))); - session.commitTransaction(); - session.endSession(); - } - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - - // Then insert a key that's not majority-committed. - rollbackTest.transitionToRollbackOperations(); - { - const session = node.startSession(); - session.startTransaction(); - const stmts = [Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback)]; - if (bulk) { - assert.commandWorked(session.getDatabase("system").runCommand( - {"_shardsvrWriteGlobalIndexKeys": 1, ops: stmts})); - } else { - for (let stmt of stmts) { - assert.commandWorked(session.getDatabase("system").runCommand(stmt)); - } - } - session.commitTransaction(); - session.endSession(); - } - assert.eq(2, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback["docKey"]}) - .itcount()); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // Only the majority-committed key is left. - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - assert.eq(0, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback["docKey"]}) - .itcount()); - - // Log calls out that the index key insert has been rolled back. - assert( - checkLog.checkContainsWithCountJson(node, - 6984700, - {"insertGlobalIndexKey": 1, "deleteGlobalIndexKey": 0}, - 1, - null, - true /*isRelaxed*/)); - - // The rollback wrote the rolled-back index key insert to a file. - const replTest = rollbackTest.getTestFixture(); - const expectedEntries = [Object.extend({_id: keyToRollback.docKey}, - {"ik": BinData(0, "KwIE"), "tb": BinData(0, "AQ==")})]; - checkRollbackFiles(replTest.getDbPath(node), "system." + collName, uuid, expectedEntries); -} - -// Rollback a single index key delete. -function rollbackSingleKeyDelete(bulk) { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const uuid = UUID(); - jsTestLog("rollbackSingleKeyDelete uuid=" + uuid + ", bulk=" + bulk, ", primary=" + node); - - const collName = uuidToCollName(uuid); - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - assert.eq(0, node.getDB("system").getCollection(collName).find().itcount()); - - const key = {key: {a: 0}, docKey: {sk: 0, _id: 0}}; - - // Insert a key, majority-committed. - { - const session = node.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("system").runCommand( - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, key))); - session.commitTransaction(); - session.endSession(); - } - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, node.getDB("system").getCollection(collName).find({_id: key["docKey"]}).itcount()); - - // Then delete the key, not majority-committed. - rollbackTest.transitionToRollbackOperations(); - { - const session = node.startSession(); - session.startTransaction(); - const stmts = [Object.extend({"_shardsvrDeleteGlobalIndexKey": uuid}, key)]; - if (bulk) { - assert.commandWorked(session.getDatabase("system").runCommand( - {"_shardsvrWriteGlobalIndexKeys": 1, ops: stmts})); - } else { - for (let stmt of stmts) { - assert.commandWorked(session.getDatabase("system").runCommand(stmt)); - } - } - session.commitTransaction(); - session.endSession(); - } - assert.eq(0, node.getDB("system").getCollection(collName).find().itcount()); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // The key is still present, as its delete wasn't majority-committed. - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, node.getDB("system").getCollection(collName).find({_id: key["docKey"]}).itcount()); - - // Log calls out that the index key delete has been rolled back. - assert( - checkLog.checkContainsWithCountJson(node, - 6984700, - {"insertGlobalIndexKey": 0, "deleteGlobalIndexKey": 1}, - 1, - null, - true /*isRelaxed*/)); -} - -function rollbackOneKeyInsertTwoKeyDeletes(bulk) { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const uuid = UUID(); - jsTestLog("rollbackOneKeyInsertTwoKeyDeletes uuid=" + uuid + ", bulk=" + bulk, - ", primary=" + node); - - const collName = uuidToCollName(uuid); - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - assert.eq(0, node.getDB("system").getCollection(collName).find().itcount()); - - const keyMajorityCommitted = {key: {a: 0}, docKey: {sk: 0, _id: 0}}; - const keyToRollback0 = {key: {a: 1}, docKey: {sk: 1, _id: 1}}; - const keyToRollback1 = {key: {a: 2}, docKey: {sk: 2, _id: 2}}; - - // Insert a key, majority-committed. - { - const session = node.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("system").runCommand( - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyMajorityCommitted))); - session.commitTransaction(); - session.endSession(); - } - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - - // Then delete the key and insert two more keys. All these writes are not majority-committed. - rollbackTest.transitionToRollbackOperations(); - { - const session = node.startSession(); - session.startTransaction(); - const stmts = [ - Object.extend({"_shardsvrDeleteGlobalIndexKey": uuid}, keyMajorityCommitted), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback0), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback1) - ]; - if (bulk) { - assert.commandWorked(session.getDatabase("system").runCommand( - {"_shardsvrWriteGlobalIndexKeys": 1, ops: stmts})); - } else { - for (let stmt of stmts) { - assert.commandWorked(session.getDatabase("system").runCommand(stmt)); - } - } - session.commitTransaction(); - session.endSession(); - } - assert.eq(2, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback0["docKey"]}) - .itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback1["docKey"]}) - .itcount()); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // The only key that's present is the majority-committed one. - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - - // Log calls out that two index key inserts and one key delete have been rolled back. - assert( - checkLog.checkContainsWithCountJson(node, - 6984700, - {"insertGlobalIndexKey": 2, "deleteGlobalIndexKey": 1}, - 1, - null, - true /*isRelaxed*/)); - - // The rollback wrote the two rolled-back index key inserts to a file. - const replTest = rollbackTest.getTestFixture(); - const expectedEntries = [ - Object.extend({_id: keyToRollback1.docKey}, - {"ik": BinData(0, "KwQE"), "tb": BinData(0, "AQ==")}), - Object.extend({_id: keyToRollback0.docKey}, - {"ik": BinData(0, "KwIE"), "tb": BinData(0, "AQ==")}), - ]; - checkRollbackFiles(replTest.getDbPath(node), "system." + collName, uuid, expectedEntries); -} - -function rollbackCreateWithCrud(bulk) { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const uuid = UUID(); - jsTestLog("rollbackCreateWithCrud uuid=" + uuid + ", bulk=" + bulk, ", primary=" + node); - - const collName = uuidToCollName(uuid); - - const keyToRollback0 = {key: {a: 1}, docKey: {sk: 1, _id: 1}}; - const keyToRollback1 = {key: {a: 2}, docKey: {sk: 2, _id: 2}}; - const keyToRollback2 = {key: {a: 3}, docKey: {sk: 3, _id: 3}}; - - // Create a container and insert keys to it. All these operations are not majority-committed. - rollbackTest.transitionToRollbackOperations(); - { - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - - const session = node.startSession(); - session.startTransaction(); - const stmts = [ - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback0), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback1), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback2), - Object.extend({"_shardsvrDeleteGlobalIndexKey": uuid}, keyToRollback1), - ]; - if (bulk) { - assert.commandWorked(session.getDatabase("system").runCommand( - {"_shardsvrWriteGlobalIndexKeys": 1, ops: stmts})); - } else { - for (let stmt of stmts) { - assert.commandWorked(session.getDatabase("system").runCommand(stmt)); - } - } - session.commitTransaction(); - session.endSession(); - } - assert.eq(2, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback0["docKey"]}) - .itcount()); - assert.eq(0, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback1["docKey"]}) - .itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyToRollback2["docKey"]}) - .itcount()); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // The global index container doesn't exist. - const container = - node.getDB("system").runCommand({listCollections: 1, filter: {name: collName}}); - assert.eq(container.cursor.firstBatch.length, 0); - - // Log calls out that three index key inserts and one key delete have been rolled back. - assert( - checkLog.checkContainsWithCountJson(node, - 6984700, - {"insertGlobalIndexKey": 3, "deleteGlobalIndexKey": 1}, - 1, - null, - true /*isRelaxed*/)); - - // The rollback wrote the two rolled-back index key inserts to a file. - const replTest = rollbackTest.getTestFixture(); - const expectedEntries = [ - Object.extend({_id: keyToRollback2.docKey}, - {"ik": BinData(0, "KwYE"), "tb": BinData(0, "AQ==")}), - Object.extend({_id: keyToRollback0.docKey}, - {"ik": BinData(0, "KwIE"), "tb": BinData(0, "AQ==")}), - ]; - checkRollbackFiles(replTest.getDbPath(node), "system." + collName, uuid, expectedEntries); -} - -function rollbackDropWithCrud(bulk) { - const node = rollbackTest.getPrimary(); - const adminDB = node.getDB("admin"); - const uuid = UUID(); - jsTestLog("rollbackDropWithCrud uuid=" + uuid + ", bulk=" + bulk, ", primary=" + node); - - const collName = uuidToCollName(uuid); - - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - const keyMajorityCommitted = {key: {a: 0}, docKey: {sk: 0, _id: 0}}; - - // Insert a key that will be majority-committed. - { - const session = node.startSession(); - session.startTransaction(); - assert.commandWorked(session.getDatabase("system").runCommand( - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyMajorityCommitted))); - session.commitTransaction(); - session.endSession(); - } - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - - const keyToRollback0 = {key: {a: 1}, docKey: {sk: 1, _id: 1}}; - const keyToRollback1 = {key: {a: 2}, docKey: {sk: 2, _id: 2}}; - const keyToRollback2 = {key: {a: 3}, docKey: {sk: 3, _id: 3}}; - - // Write to the container and drop it. All these operations are not majority-committed. - rollbackTest.transitionToRollbackOperations(); - { - assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: uuid})); - - const session = node.startSession(); - session.startTransaction(); - const stmts = [ - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback1), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback2), - Object.extend({"_shardsvrDeleteGlobalIndexKey": uuid}, keyToRollback1), - Object.extend({"_shardsvrInsertGlobalIndexKey": uuid}, keyToRollback0), - Object.extend({"_shardsvrDeleteGlobalIndexKey": uuid}, keyToRollback2), - ]; - if (bulk) { - assert.commandWorked(session.getDatabase("system").runCommand( - {"_shardsvrWriteGlobalIndexKeys": 1, ops: stmts})); - } else { - for (let stmt of stmts) { - assert.commandWorked(session.getDatabase("system").runCommand(stmt)); - } - } - session.commitTransaction(); - session.endSession(); - assert.commandWorked(adminDB.runCommand({_shardsvrDropGlobalIndex: uuid})); - } - // The global index container doesn't exist. - const containerBeforeRollback = - node.getDB("system").runCommand({listCollections: 1, filter: {name: collName}}); - assert.eq(containerBeforeRollback.cursor.firstBatch.length, 0); - - // Perform the rollback. - rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); - rollbackTest.transitionToSyncSourceOperationsDuringRollback(); - rollbackTest.transitionToSteadyStateOperations(); - - // The global index exists, with the single majority-committed key. - const containerAfterRollback = - node.getDB("system").runCommand({listCollections: 1, filter: {name: collName}}); - assert.eq(containerAfterRollback.cursor.firstBatch.length, 1); - - assert.eq(1, node.getDB("system").getCollection(collName).find().itcount()); - assert.eq(1, - node.getDB("system") - .getCollection(collName) - .find({_id: keyMajorityCommitted["docKey"]}) - .itcount()); - - // Log calls out that three index key inserts and two key deletes have been rolled back. - assert( - checkLog.checkContainsWithCountJson(node, - 6984700, - {"insertGlobalIndexKey": 3, "deleteGlobalIndexKey": 2}, - 1, - null, - true /*isRelaxed*/)); - - // We've reset the original fast count rather than doing an expensive collection scan. - assert(checkLog.checkContainsWithCountJson(node, 21602, undefined, 0)); - - // Log states that we're not going to write a rollback file for a collection whose drop was - // rolled back. - assert(checkLog.checkContainsWithCountJson( - node, - 21608, - {"uuid": {"uuid": {"$uuid": extractUUIDFromObject(uuid)}}}, - 1, - null, - true /*isRelaxed*/)); -} - -rollbackDDLOps(); -for (let bulk of [false, true]) { - rollbackSingleKeyInsert(bulk); - rollbackSingleKeyDelete(bulk); - rollbackOneKeyInsertTwoKeyDeletes(bulk); - rollbackCreateWithCrud(bulk); - rollbackDropWithCrud(bulk); -} - -rollbackTest.stop(); -})(); diff --git a/jstests/replsets/libs/tenant_migration_test.js b/jstests/replsets/libs/tenant_migration_test.js index bd79c51b6bb..04619679e33 100644 --- a/jstests/replsets/libs/tenant_migration_test.js +++ b/jstests/replsets/libs/tenant_migration_test.js @@ -136,7 +136,7 @@ function TenantMigrationTest({ nodeOptions["setParameter"] = setParameterOpts; const rstName = `${name}_${(isDonor ? "donor" : "recipient")}`; - const rst = new ReplSetTest({name: rstName, nodes, serverless: true, nodeOptions}); + const rst = new ReplSetTest({name: rstName, nodes, nodeOptions}); rst.startSet(); if (initiateRstWithHighElectionTimeout) { rst.initiateWithHighElectionTimeout(); @@ -231,7 +231,6 @@ function TenantMigrationTest({ this.runDonorStartMigration = function({ migrationIdString, tenantId, - protocol, recipientConnectionString = recipientRst.getURL(), readPreference = {mode: "primary"}, donorCertificateForRecipient = migrationCertificates.donorCertificateForRecipient, @@ -252,7 +251,6 @@ function TenantMigrationTest({ readPreference, donorCertificateForRecipient, recipientCertificateForDonor, - protocol }; const stateRes = TenantMigrationUtil.runTenantMigrationCommand(cmdObj, this.getDonorRst(), { diff --git a/jstests/replsets/libs/tenant_migration_util.js b/jstests/replsets/libs/tenant_migration_util.js index 381dc3855bd..d0e9354f1c1 100644 --- a/jstests/replsets/libs/tenant_migration_util.js +++ b/jstests/replsets/libs/tenant_migration_util.js @@ -262,17 +262,6 @@ var TenantMigrationUtil = (function() { return res; } - const ServerlessLockType = - {None: 0, ShardSplitDonor: 1, TenantMigrationDonor: 2, TenantMigrationRecipient: 3}; - - /** - * Return the active serverless operation lock, if one is acquired. - */ - function getServerlessOperationLock(node) { - return assert.commandWorked(node.adminCommand({serverStatus: 1, serverless: 1})) - .serverless.operationLock; - } - /** * Returns the TenantMigrationAccessBlocker serverStatus output for the multi-tenant migration * or shard merge for the given node. @@ -572,8 +561,6 @@ var TenantMigrationUtil = (function() { makeMigrationCertificatesForTest, makeX509OptionsForTest, isMigrationCompleted, - ServerlessLockType, - getServerlessOperationLock, getTenantMigrationAccessBlocker, getTenantMigrationAccessBlockers, getNumBlockedReads, diff --git a/jstests/replsets/shardsvr_global_index_crud_rollback.js b/jstests/replsets/shardsvr_global_index_crud_rollback.js new file mode 100644 index 00000000000..7e4349c8945 --- /dev/null +++ b/jstests/replsets/shardsvr_global_index_crud_rollback.js @@ -0,0 +1,87 @@ +/** + * Tests that global index key insert and delete are properly rolled back. + * + * @tags: [ + * featureFlagGlobalIndexes, + * requires_fcv_62, + * requires_replication, + * ] + */ +(function() { +'use strict'; + +load('jstests/replsets/libs/rollback_test.js'); + +const rollbackTest = new RollbackTest(jsTestName()); + +const primary = rollbackTest.getPrimary(); +const adminDB = primary.getDB("admin"); +const globalIndexUUID = UUID(); +const [_, uuidString] = globalIndexUUID.toString().match(/"((?:\\.|[^"\\])*)"/); +const collName = "globalIndexes." + uuidString; + +assert.commandWorked(adminDB.runCommand({_shardsvrCreateGlobalIndex: globalIndexUUID})); + +// We start on a clean slate: the global index container is empty. +assert.eq(0, primary.getDB("system").getCollection(collName).find().itcount()); + +const docKeyToInsert = { + sk: 1, + _id: 1 +}; +const docKeyToDelete = { + sk: 1, + _id: 5 +}; +// Add key to delete during rollback ops phase. +{ + const session = primary.startSession(); + session.startTransaction(); + assert.commandWorked(session.getDatabase("system").runCommand( + {"_shardsvrInsertGlobalIndexKey": globalIndexUUID, key: {a: 5}, docKey: docKeyToDelete})); + session.commitTransaction(); + session.endSession(); +} + +rollbackTest.transitionToRollbackOperations(); + +// Insert an index key to be rolled back. +{ + const session = primary.startSession(); + session.startTransaction(); + assert.commandWorked(session.getDatabase("system").runCommand( + {"_shardsvrInsertGlobalIndexKey": globalIndexUUID, key: {a: 1}, docKey: docKeyToInsert})); + assert.commandWorked(session.getDatabase("system").runCommand( + {"_shardsvrDeleteGlobalIndexKey": globalIndexUUID, key: {a: 5}, docKey: docKeyToDelete})); + session.commitTransaction(); + session.endSession(); +} + +// The inserted index key is present on the primary. +assert.eq(1, primary.getDB("system").getCollection(collName).find({_id: docKeyToInsert}).itcount()); +// The deleted index key is not present on the primary. +assert.eq(0, primary.getDB("system").getCollection(collName).find({_id: docKeyToDelete}).itcount()); + +// Perform the rollback. +rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); +rollbackTest.transitionToSyncSourceOperationsDuringRollback(); +rollbackTest.transitionToSteadyStateOperations(); + +// Verify both global index key insert and delete have been rolled back. The container has exactly +// one entry, and is the one inserted before transitionToRollbackOperations. +rollbackTest.getTestFixture().nodes.forEach(function(node) { + const nodeDB = node.getDB("system"); + const found = nodeDB.getCollection(collName).find(); + const elArr = found.toArray(); + assert.eq(1, elArr.length); + assert.eq(elArr[0]["_id"], docKeyToDelete); +}); + +// TODO (SERVER-69847): fast count is not updated properly for global index CRUD ops after rollback. +// Current test implementation makes fastcount valid due to rolling back both a delete and an +// insert. After fixing fast count, we should make this test fail if fast count is not working +// properly. +// TODO (SERVER-69847): add a rollback test for _shardsvrWriteGlobalIndexKeys too. + +rollbackTest.stop(); +})(); diff --git a/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js b/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js index 8afc2d2fcf9..85429af3ddd 100644 --- a/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js +++ b/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js @@ -5,7 +5,6 @@ * @tags: [ * incompatible_with_macos, * incompatible_with_windows_tls, - * requires_fcv_62, * requires_majority_read_concern, * requires_persistence, * serverless, @@ -20,7 +19,6 @@ load("jstests/libs/uuid_util.js"); load("jstests/libs/parallelTester.js"); load("jstests/libs/write_concern_util.js"); load("jstests/replsets/libs/tenant_migration_test.js"); -const {ServerlessLockType, getServerlessOperationLock} = TenantMigrationUtil; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); @@ -49,7 +47,6 @@ const migrationOpts = { migrationIdString: extractUUIDFromObject(UUID()), tenantId: kTenantId }; - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); // We must wait for the migration to have finished replicating the recipient keys on the donor set // before starting initial sync, otherwise the migration will hang while waiting for initial sync to @@ -86,8 +83,7 @@ donorRst.awaitReplication(); stopServerReplication(initialSyncNode); let configDonorsColl = initialSyncNode.getCollection(TenantMigrationTest.kConfigDonorsNS); -assert.lte(configDonorsColl.count(), 1); -let donorDoc = configDonorsColl.findOne(); +let donorDoc = configDonorsColl.findOne({tenantId: kTenantId}); if (donorDoc) { jsTestLog("Initial sync completed while migration was in state: " + donorDoc.state); switch (donorDoc.state) { @@ -152,13 +148,6 @@ if (donorDoc) { } } -const activeServerlessLock = getServerlessOperationLock(initialSyncNode); -if (donorDoc && !donorDoc.expireAt) { - assert.eq(activeServerlessLock, ServerlessLockType.TenantMigrationDonor); -} else { - assert.eq(activeServerlessLock, ServerlessLockType.None); -} - if (fp) { fp.off(); } diff --git a/jstests/replsets/tenant_migration_donor_startup_recovery.js b/jstests/replsets/tenant_migration_donor_startup_recovery.js index 7dbbcb56f8c..7a564e416a5 100644 --- a/jstests/replsets/tenant_migration_donor_startup_recovery.js +++ b/jstests/replsets/tenant_migration_donor_startup_recovery.js @@ -8,7 +8,6 @@ * incompatible_with_macos, * incompatible_with_shard_merge, * incompatible_with_windows_tls, - * requires_fcv_62, * requires_majority_read_concern, * requires_persistence, * serverless, @@ -21,7 +20,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); load("jstests/replsets/libs/tenant_migration_test.js"); -const {ServerlessLockType, getServerlessOperationLock} = TenantMigrationUtil; const donorRst = new ReplSetTest({ nodes: 1, @@ -72,8 +70,7 @@ donorRst.startSet({ donorPrimary = donorRst.getPrimary(); const configDonorsColl = donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS); -assert.lte(configDonorsColl.count(), 1); -const donorDoc = configDonorsColl.findOne(); +const donorDoc = configDonorsColl.findOne({tenantId: kTenantId}); if (donorDoc) { switch (donorDoc.state) { case TenantMigrationTest.DonorState.kAbortingIndexBuilds: @@ -136,13 +133,6 @@ if (donorDoc) { } } -const activeServerlessLock = getServerlessOperationLock(donorPrimary); -if (donorDoc && !donorDoc.expireAt) { - assert.eq(activeServerlessLock, ServerlessLockType.TenantMigrationDonor); -} else { - assert.eq(activeServerlessLock, ServerlessLockType.None); -} - tenantMigrationTest.stop(); donorRst.stopSet(); })(); diff --git a/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js b/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js index 1012be2670c..404bf0fa765 100644 --- a/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js +++ b/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js @@ -6,7 +6,6 @@ * incompatible_with_macos, * incompatible_with_shard_merge, * incompatible_with_windows_tls, - * requires_fcv_62, * requires_majority_read_concern, * requires_persistence, * serverless, @@ -20,7 +19,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); load("jstests/libs/write_concern_util.js"); load("jstests/replsets/libs/tenant_migration_test.js"); -const {ServerlessLockType, getServerlessOperationLock} = TenantMigrationUtil; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); @@ -61,8 +59,7 @@ recipientRst.awaitReplication(); stopServerReplication(initialSyncNode); const configRecipientsColl = initialSyncNode.getCollection(TenantMigrationTest.kConfigRecipientsNS); -assert.lte(configRecipientsColl.count(), 1); -const recipientDoc = configRecipientsColl.findOne(); +const recipientDoc = configRecipientsColl.findOne({tenantId: kTenantId}); if (recipientDoc) { switch (recipientDoc.state) { case TenantMigrationTest.RecipientState.kStarted: @@ -100,13 +97,6 @@ if (recipientDoc) { } } -const activeServerlessLock = getServerlessOperationLock(initialSyncNode); -if (recipientDoc && !recipientDoc.expireAt) { - assert.eq(activeServerlessLock, ServerlessLockType.TenantMigrationRecipient); -} else { - assert.eq(activeServerlessLock, ServerlessLockType.None); -} - restartServerReplication(initialSyncNode); tenantMigrationTest.stop(); diff --git a/jstests/serverless/libs/basic_serverless_test.js b/jstests/serverless/libs/basic_serverless_test.js index 727827429db..69aeab56616 100644 --- a/jstests/serverless/libs/basic_serverless_test.js +++ b/jstests/serverless/libs/basic_serverless_test.js @@ -41,45 +41,6 @@ const runCommitSplitThreadWrapper = function(rstArgs, donorRst, commitShardSplitCmdObj, retryOnRetryableErrors, enableDonorStartMigrationFsync); }; -/* - * Wait for state document garbage collection by polling for when the document has been removed - * from the 'shardSplitDonors' namespace, and all access blockers have been removed. - * @param {migrationId} id that was used for the commitShardSplit command. - * @param {tenantIds} tenant ids of the shard split. - */ -const waitForGarbageCollectionForSplit = function(donorNodes, migrationId, tenantIds) { - jsTestLog("Wait for garbage collection"); - assert.soon(() => donorNodes.every(node => { - const donorDocumentDeleted = - node.getCollection(BasicServerlessTest.kConfigSplitDonorsNS).count({ - _id: migrationId - }) === 0; - const allAccessBlockersRemoved = tenantIds.every( - id => BasicServerlessTest.getTenantMigrationAccessBlocker({node, id}) == null); - - const result = donorDocumentDeleted && allAccessBlockersRemoved; - if (!result) { - const status = []; - if (!donorDocumentDeleted) { - status.push(`donor document to be deleted (docCount=${ - node.getCollection(BasicServerlessTest.kConfigSplitDonorsNS).count({ - _id: migrationId - })})`); - } - - if (!allAccessBlockersRemoved) { - const tenantsWithBlockers = tenantIds.filter( - id => BasicServerlessTest.getTenantMigrationAccessBlocker({node, id}) != null); - status.push(`access blockers to be removed (${tenantsWithBlockers})`); - } - } - return donorDocumentDeleted && allAccessBlockersRemoved; - }), - "tenant access blockers weren't removed", - 60 * 1000, - 1 * 1000); -}; - const runShardSplitCommand = function( replicaSet, cmdObj, retryOnRetryableErrors, enableDonorStartMigrationFsync) { let res; @@ -394,7 +355,38 @@ class BasicServerlessTest { * @param {tenantIds} tenant ids of the shard split. */ waitForGarbageCollection(migrationId, tenantIds) { - return waitForGarbageCollectionForSplit(this.donor.nodes, migrationId, tenantIds); + jsTestLog("Wait for garbage collection"); + const donorNodes = this.donor.nodes; + assert.soon(() => donorNodes.every(node => { + const donorDocumentDeleted = + node.getCollection(BasicServerlessTest.kConfigSplitDonorsNS).count({ + _id: migrationId + }) === 0; + const allAccessBlockersRemoved = tenantIds.every( + id => BasicServerlessTest.getTenantMigrationAccessBlocker({node, id}) == null); + + const result = donorDocumentDeleted && allAccessBlockersRemoved; + if (!result) { + const status = []; + if (!donorDocumentDeleted) { + status.push(`donor document to be deleted (docCount=${ + node.getCollection(BasicServerlessTest.kConfigSplitDonorsNS).count({ + _id: migrationId + })})`); + } + + if (!allAccessBlockersRemoved) { + const tenantsWithBlockers = + tenantIds.filter(id => BasicServerlessTest.getTenantMigrationAccessBlocker( + {node, id}) != null); + status.push(`access blockers to be removed (${tenantsWithBlockers})`); + } + } + return donorDocumentDeleted && allAccessBlockersRemoved; + }), + "tenant access blockers weren't removed", + 60 * 1000, + 1 * 1000); } /** diff --git a/jstests/serverless/libs/serverless_reject_multiple_ops_utils.js b/jstests/serverless/libs/serverless_reject_multiple_ops_utils.js deleted file mode 100644 index ca79b44778e..00000000000 --- a/jstests/serverless/libs/serverless_reject_multiple_ops_utils.js +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Utility functions for serverless_reject_multiple_ops tests - * - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/rslib.js"); -load("jstests/libs/parallelTester.js"); - -function waitForMergeToComplete(migrationOpts, migrationId, test) { - // Assert that the migration has already been started. - assert(test.getDonorPrimary().getCollection(TenantMigrationTest.kConfigDonorsNS).findOne({ - _id: migrationId - })); - - const donorStartReply = test.runDonorStartMigration( - migrationOpts, {waitForMigrationToComplete: true, retryOnRetryableErrors: false}); - - return donorStartReply; -} - -function commitSplitAsync(rst, tenantIds, recipientTagName, recipientSetName, migrationId) { - jsTestLog("Running commitAsync command"); - - const rstArgs = createRstArgs(rst); - const migrationIdString = extractUUIDFromObject(migrationId); - - const thread = new Thread(runCommitSplitThreadWrapper, - rstArgs, - migrationIdString, - tenantIds, - recipientTagName, - recipientSetName, - false /* enableDonorStartMigrationFsync */); - thread.start(); - - return thread; -} - -function addRecipientNodes(rst, recipientTagName) { - const numNodes = 3; // default to three nodes - let recipientNodes = []; - const options = TenantMigrationUtil.makeX509OptionsForTest(); - jsTestLog(`Adding ${numNodes} non-voting recipient nodes to donor`); - for (let i = 0; i < numNodes; ++i) { - recipientNodes.push(rst.add(options.donor)); - } - - const primary = rst.getPrimary(); - const admin = primary.getDB('admin'); - const config = rst.getReplSetConfigFromNode(); - config.version++; - - // ensure recipient nodes are added as non-voting members - recipientNodes.forEach(node => { - config.members.push({ - host: node.host, - votes: 0, - priority: 0, - hidden: true, - tags: {[recipientTagName]: ObjectId().valueOf()} - }); - }); - - // reindex all members from 0 - config.members = config.members.map((member, idx) => { - member._id = idx; - return member; - }); - - assert.commandWorked(admin.runCommand({replSetReconfig: config})); - recipientNodes.forEach(node => rst.waitForState(node, ReplSetTest.State.SECONDARY)); - - return recipientNodes; -} diff --git a/jstests/serverless/list_databases_for_all_tenants.js b/jstests/serverless/list_databases_for_all_tenants.js index c0e3b3cc9bc..98f87d794fe 100644 --- a/jstests/serverless/list_databases_for_all_tenants.js +++ b/jstests/serverless/list_databases_for_all_tenants.js @@ -37,12 +37,13 @@ function verifyNameOnly(listDatabasesOut) { // creates 'num' databases on 'conn', each belonging to a different tenant function createMultitenantDatabases(conn, tokenConn, num) { + let kTenant; let tenantIds = []; let expectedDatabases = []; for (let i = 0; i < num; i++) { // Randomly generate a tenantId - let kTenant = ObjectId(); + kTenant = ObjectId(); tenantIds.push(kTenant.str); // Create a user for kTenant and then set the security token on the connection. @@ -134,18 +135,10 @@ function runTestCheckCmdOptions(mongod, tenantIds) { assert.eq(2, cmdRes.databases.length); verifySizeSum(cmdRes); - // Now return the system admin database and tenants' admin databases. + // Now return only the admin database. cmdRes = assert.commandWorked( adminDB.runCommand({listDatabasesForAllTenants: 1, filter: {name: "admin"}})); - assert.eq(1 + tenantIds.length, cmdRes.databases.length, tojson(cmdRes.databases)); - verifySizeSum(cmdRes); - - // Now return only one tenant admin database. - cmdRes = assert.commandWorked(adminDB.runCommand({ - listDatabasesForAllTenants: 1, - filter: {name: "admin", tenantId: ObjectId(tenantIds[2])} - })); - assert.eq(1, cmdRes.databases.length, tojson(cmdRes.databases)); + assert.eq(1, cmdRes.databases.length); verifySizeSum(cmdRes); // Now return only the names. diff --git a/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js b/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js index b4a418b33a4..6123695404f 100644 --- a/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js +++ b/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js @@ -5,16 +5,9 @@ load('jstests/aggregation/extras/utils.js'); // For arrayEq() -// TODO SERVER-69726 Make this replica set have multiple nodes. -const rst = new ReplSetTest({ - nodes: 1, - nodeOptions: {auth: '', setParameter: {multitenancySupport: true, featureFlagMongoStore: true}} -}); -rst.startSet({keyFile: 'jstests/libs/key1'}); -rst.initiate(); - -const primary = rst.getPrimary(); -const adminDb = primary.getDB('admin'); +const mongod = MongoRunner.runMongod( + {auth: '', setParameter: {multitenancySupport: true, featureFlagMongoStore: true}}); +const adminDb = mongod.getDB('admin'); // Prepare a user for testing pass tenant via $tenant. // Must be authenticated as a user with ActionType::useTenant in order to use $tenant. @@ -25,7 +18,7 @@ const kTenant = ObjectId(); const kOtherTenant = ObjectId(); const kDbName = 'myDb'; const kCollName = 'myColl'; -const testDb = primary.getDB(kDbName); +const testDb = mongod.getDB(kDbName); const testColl = testDb.getCollection(kCollName); // In this jstest, the collection (defined by kCollName) and the document "{_id: 0, a: 1, b: 1}" @@ -238,35 +231,6 @@ const testColl = testDb.getCollection(kCollName); {insert: kCollName, documents: [{_id: 0, a: 1, b: 1}], '$tenant': kTenant})); } -// Test that transactions can be run successfully. -{ - const lsid = assert.commandWorked(testDb.runCommand({startSession: 1, $tenant: kTenant})).id; - assert.commandWorked(testDb.runCommand({ - delete: kCollName, - deletes: [{q: {_id: 0, a: 1, b: 1}, limit: 1}], - startTransaction: true, - lsid: lsid, - txnNumber: NumberLong(0), - autocommit: false, - '$tenant': kTenant - })); - assert.commandWorked(testDb.adminCommand({ - commitTransaction: 1, - lsid: lsid, - txnNumber: NumberLong(0), - autocommit: false, - $tenant: kTenant - })); - - const findRes = assert.commandWorked(testDb.runCommand({find: kCollName, '$tenant': kTenant})); - assert.eq(0, findRes.cursor.firstBatch.length, tojson(findRes.cursor.firstBatch)); - - // Reset the collection so other test cases can still access this collection with kCollName - // after this test. - assert.commandWorked(testDb.runCommand( - {insert: kCollName, documents: [{_id: 0, a: 1, b: 1}], '$tenant': kTenant})); -} - // Test createIndexes, listIndexes and dropIndexes command. { var sortIndexesByName = function(indexes) { @@ -316,5 +280,5 @@ const testColl = testDb.getCollection(kCollName); assert(arrayEq([{key: {"_id": 1}, name: "_id_"}], getIndexesKeyAndName(res.cursor.firstBatch))); } -rst.stopSet(); +MongoRunner.stopMongod(mongod); })(); diff --git a/jstests/serverless/native_tenant_data_isolation_basic_security_token.js b/jstests/serverless/native_tenant_data_isolation_basic_security_token.js index 68805207388..9add0073984 100644 --- a/jstests/serverless/native_tenant_data_isolation_basic_security_token.js +++ b/jstests/serverless/native_tenant_data_isolation_basic_security_token.js @@ -5,16 +5,9 @@ load('jstests/aggregation/extras/utils.js'); // For arrayEq() -// TODO SERVER-69726 Make this replica set have multiple nodes. -const rst = new ReplSetTest({ - nodes: 1, - nodeOptions: {auth: '', setParameter: {multitenancySupport: true, featureFlagMongoStore: true}} -}); -rst.startSet({keyFile: 'jstests/libs/key1'}); -rst.initiate(); - -const primary = rst.getPrimary(); -const adminDb = primary.getDB('admin'); +const mongod = MongoRunner.runMongod( + {auth: '', setParameter: {multitenancySupport: true, featureFlagMongoStore: true}}); +const adminDb = mongod.getDB('admin'); // Prepare a user for testing pass tenant via $tenant. // Must be authenticated as a user with ActionType::useTenant in order to use $tenant. @@ -26,7 +19,7 @@ const kOtherTenant = ObjectId(); const kDbName = 'test'; const kCollName = 'myColl0'; -const tokenConn = new Mongo(primary.host); +const tokenConn = new Mongo(mongod.host); const securityToken = _createSecurityToken({user: "userTenant1", db: '$external', tenant: kTenant}); const tokenDB = tokenConn.getDB(kDbName); @@ -37,7 +30,7 @@ const tokenDB = tokenConn.getDB(kDbName); // Test commands using a security token for one tenant. { // Create a user for kTenant and then set the security token on the connection. - assert.commandWorked(primary.getDB('$external').runCommand({ + assert.commandWorked(mongod.getDB('$external').runCommand({ createUser: "userTenant1", '$tenant': kTenant, roles: @@ -161,23 +154,6 @@ const tokenDB = tokenConn.getDB(kDbName); tokenDB.runCommand({insert: kCollName, documents: [{_id: 0, a: 1, b: 1}]})); } - // Test that transactions can be run successfully. - { - const session = tokenDB.getMongo().startSession(); - const sessionDb = session.getDatabase(kDbName); - session.startTransaction(); - assert.commandWorked(sessionDb.runCommand( - {delete: kCollName, deletes: [{q: {_id: 0, a: 1, b: 1}, limit: 1}]})); - session.commitTransaction_forTesting(); - - const findRes = assert.commandWorked(tokenDB.runCommand({find: kCollName})); - assert.eq(0, findRes.cursor.firstBatch.length, tojson(findRes.cursor.firstBatch)); - - // Reset the collection and document. - assert.commandWorked( - tokenDB.runCommand({insert: kCollName, documents: [{_id: 0, a: 1, b: 1}]})); - } - // Test createIndexes, listIndexes and dropIndexes command. { var sortIndexesByName = function(indexes) { @@ -224,7 +200,7 @@ const tokenDB = tokenConn.getDB(kDbName); { // Create a user for a different tenant, and set the security token on the connection. // We reuse the same connection, but swap the token out. - assert.commandWorked(primary.getDB('$external').runCommand({ + assert.commandWorked(mongod.getDB('$external').runCommand({ createUser: "userTenant2", '$tenant': kOtherTenant, roles: @@ -284,7 +260,7 @@ const tokenDB = tokenConn.getDB(kDbName); // commands on the doc when passing the correct tenant, but not when passing a different // tenant. { - const privelegedConn = new Mongo(primary.host); + const privelegedConn = new Mongo(mongod.host); assert(privelegedConn.getDB('admin').auth('admin', 'pwd')); const privelegedDB = privelegedConn.getDB('test'); @@ -344,5 +320,5 @@ const tokenDB = tokenConn.getDB(kDbName); } } -rst.stopSet(); +MongoRunner.stopMongod(mongod); })(); diff --git a/jstests/serverless/serverless_reject_multiple_ops_access_blocker.js b/jstests/serverless/serverless_reject_multiple_ops_access_blocker.js deleted file mode 100644 index 0c7107f9682..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_access_blocker.js +++ /dev/null @@ -1,67 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartMigrationWhenThereIsAnExistingAccessBlocker(protocol) { - // Test that we cannot start a tenant migration for a tenant that already has an access blocker. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - // Ensure a high enough delay so the shard split document is not deleted before tenant migration - // is started. - sharedOptions = {}; - sharedOptions["setParameter"] = { - shardSplitGarbageCollectionDelayMS: 36000000, - ttlMonitorSleepSecs: 1 - }; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - let recipientNodes = addRecipientNodes(test.getDonorRst(), recipientTagName); - - const commitThread = commitSplitAsync( - test.getDonorRst(), tenantIds, recipientTagName, recipientSetName, splitMigrationId); - assert.commandWorked(commitThread.returnData()); - - // Remove recipient nodes - test.getDonorRst().nodes = - test.getDonorRst().nodes.filter(node => !recipientNodes.includes(node)); - test.getDonorRst().ports = - test.getDonorRst().ports.filter(port => !recipientNodes.some(node => node.port === port)); - - assert.commandWorked(test.getDonorRst().getPrimary().adminCommand( - {forgetShardSplit: 1, migrationId: splitMigrationId})); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - assert.commandFailed(test.startMigration(migrationOpts)); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - test.stop(); - jsTestLog("cannotStartMigrationWhenThereIsAnExistingAccessBlocker test completed"); -} - -cannotStartMigrationWhenThereIsAnExistingAccessBlocker("multitenant migrations"); -cannotStartMigrationWhenThereIsAnExistingAccessBlocker("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_after_garbage_collection.js b/jstests/serverless/serverless_reject_multiple_ops_migration_after_garbage_collection.js deleted file mode 100644 index f237d507262..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_after_garbage_collection.js +++ /dev/null @@ -1,69 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/parallelTester.js"); -load("jstests/libs/uuid_util.js"); - -function canStartMigrationAfterSplitGarbageCollection(protocol) { - // Test that we can start a migration after a shard split has been garbage collected. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - let recipientNodes = addRecipientNodes(test.getDonorRst(), recipientTagName); - - const commitThread = commitSplitAsync( - test.getDonorRst(), tenantIds, recipientTagName, recipientSetName, splitMigrationId); - assert.commandWorked(commitThread.returnData()); - - // Remove recipient nodes - test.getDonorRst().nodes = - test.getDonorRst().nodes.filter(node => !recipientNodes.includes(node)); - test.getDonorRst().ports = - test.getDonorRst().ports.filter(port => !recipientNodes.some(node => node.port === port)); - - assert.commandWorked(test.getDonorRst().getPrimary().adminCommand( - {forgetShardSplit: 1, migrationId: splitMigrationId})); - - waitForGarbageCollectionForSplit(test.getDonorRst().nodes, splitMigrationId, tenantIds); - - jsTestLog("Starting tenant migration"); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - assert.commandWorked(test.startMigration(migrationOpts)); - - TenantMigrationTest.assertCommitted(test.waitForMigrationToComplete(migrationOpts)); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - test.stop(); - jsTestLog("canStartMigrationAfterSplitGarbageCollection test completed"); -} - -canStartMigrationAfterSplitGarbageCollection("multitenant migrations"); -canStartMigrationAfterSplitGarbageCollection("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_different_tenant.js b/jstests/serverless/serverless_reject_multiple_ops_migration_different_tenant.js deleted file mode 100644 index 55309f25710..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_different_tenant.js +++ /dev/null @@ -1,73 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartMigrationWithDifferentTenantWhileShardSplitIsInProgress(protocol) { - // Test that we cannot start a tenant migration while a shard split is in progress. Use a - // tenantId uninvolved in the split. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - let recipientNodes = addRecipientNodes(test.getDonorRst(), recipientTagName); - - let fp = - configureFailPoint(test.getDonorRst().getPrimary(), "pauseShardSplitBeforeBlockingState"); - - const commitThread = commitSplitAsync( - test.getDonorRst(), tenantIds, recipientTagName, recipientSetName, splitMigrationId); - fp.wait(); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = "otherTenantToMove"; - } - jsTestLog("Starting tenant migration"); - assert.commandFailedWithCode(test.startMigration(migrationOpts), - ErrorCodes.ConflictingServerlessOperation); - - fp.off(); - - assert.commandWorked(commitThread.returnData()); - - test.getDonorRst().nodes = - test.getDonorRst().nodes.filter(node => !recipientNodes.includes(node)); - test.getDonorRst().ports = - test.getDonorRst().ports.filter(port => !recipientNodes.some(node => node.port === port)); - - assert.commandWorked(test.getDonorRst().getPrimary().adminCommand( - {forgetShardSplit: 1, migrationId: splitMigrationId})); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - waitForGarbageCollectionForSplit(test.getDonorRst().nodes, splitMigrationId, tenantIds); - - test.stop(); - jsTestLog("cannotStartMigrationWithDifferentTenantWhileShardSplitIsInProgress test completed"); -} - -cannotStartMigrationWithDifferentTenantWhileShardSplitIsInProgress("multitenant migrations"); -cannotStartMigrationWithDifferentTenantWhileShardSplitIsInProgress("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_donor.js b/jstests/serverless/serverless_reject_multiple_ops_migration_donor.js deleted file mode 100644 index f237d507262..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_donor.js +++ /dev/null @@ -1,69 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/parallelTester.js"); -load("jstests/libs/uuid_util.js"); - -function canStartMigrationAfterSplitGarbageCollection(protocol) { - // Test that we can start a migration after a shard split has been garbage collected. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - let recipientNodes = addRecipientNodes(test.getDonorRst(), recipientTagName); - - const commitThread = commitSplitAsync( - test.getDonorRst(), tenantIds, recipientTagName, recipientSetName, splitMigrationId); - assert.commandWorked(commitThread.returnData()); - - // Remove recipient nodes - test.getDonorRst().nodes = - test.getDonorRst().nodes.filter(node => !recipientNodes.includes(node)); - test.getDonorRst().ports = - test.getDonorRst().ports.filter(port => !recipientNodes.some(node => node.port === port)); - - assert.commandWorked(test.getDonorRst().getPrimary().adminCommand( - {forgetShardSplit: 1, migrationId: splitMigrationId})); - - waitForGarbageCollectionForSplit(test.getDonorRst().nodes, splitMigrationId, tenantIds); - - jsTestLog("Starting tenant migration"); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - assert.commandWorked(test.startMigration(migrationOpts)); - - TenantMigrationTest.assertCommitted(test.waitForMigrationToComplete(migrationOpts)); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - test.stop(); - jsTestLog("canStartMigrationAfterSplitGarbageCollection test completed"); -} - -canStartMigrationAfterSplitGarbageCollection("multitenant migrations"); -canStartMigrationAfterSplitGarbageCollection("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_donor_retry.js b/jstests/serverless/serverless_reject_multiple_ops_migration_donor_retry.js deleted file mode 100644 index 1ea49a99f06..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_donor_retry.js +++ /dev/null @@ -1,86 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function retryMigrationAfterSplitCompletes(protocol) { - // Test that we cannot start a migration while a shard split is in progress. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const firstTenantMigrationId = UUID(); - const secondTenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - const splitRst = test.getDonorRst(); - - let splitRecipientNodes = addRecipientNodes(splitRst, recipientTagName); - - let fp = configureFailPoint(splitRst.getPrimary(), "pauseShardSplitBeforeBlockingState"); - - const commitThread = - commitSplitAsync(splitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - fp.wait(); - - const firstMigrationOpts = { - migrationIdString: extractUUIDFromObject(firstTenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - firstMigrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandFailedWithCode(test.startMigration(firstMigrationOpts), - ErrorCodes.ConflictingServerlessOperation); - - fp.off(); - - assert.commandWorked(commitThread.returnData()); - - splitRst.nodes = splitRst.nodes.filter(node => !splitRecipientNodes.includes(node)); - splitRst.ports = - splitRst.ports.filter(port => !splitRecipientNodes.some(node => node.port === port)); - - assert.commandWorked( - splitRst.getPrimary().adminCommand({forgetShardSplit: 1, migrationId: splitMigrationId})); - - splitRecipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - const secondMigrationOpts = { - migrationIdString: extractUUIDFromObject(secondTenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - secondMigrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(secondMigrationOpts)); - TenantMigrationTest.assertCommitted( - waitForMergeToComplete(secondMigrationOpts, secondTenantMigrationId, test)); - assert.commandWorked(test.forgetMigration(secondMigrationOpts.migrationIdString)); - - waitForGarbageCollectionForSplit(splitRst.nodes, splitMigrationId, tenantIds); - - test.stop(); - jsTestLog("cannotStartMigrationWhileShardSplitIsInProgress test completed"); -} - -retryMigrationAfterSplitCompletes("multitenant migrations"); -retryMigrationAfterSplitCompletes("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_fail.js b/jstests/serverless/serverless_reject_multiple_ops_migration_fail.js deleted file mode 100644 index 2bf713c1cb4..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_fail.js +++ /dev/null @@ -1,72 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartMigrationWhileShardSplitIsInProgress(protocol) { - // Test that we cannot start a migration while a shard split is in progress. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - const splitRst = test.getDonorRst(); - - let splitRecipientNodes = addRecipientNodes(splitRst, recipientTagName); - - let fp = configureFailPoint(splitRst.getPrimary(), "pauseShardSplitBeforeBlockingState"); - - const commitThread = - commitSplitAsync(splitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - fp.wait(); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandFailedWithCode(test.startMigration(migrationOpts), - ErrorCodes.ConflictingServerlessOperation); - - fp.off(); - - assert.commandWorked(commitThread.returnData()); - - splitRst.nodes = splitRst.nodes.filter(node => !splitRecipientNodes.includes(node)); - splitRst.ports = - splitRst.ports.filter(port => !splitRecipientNodes.some(node => node.port === port)); - - assert.commandWorked( - splitRst.getPrimary().adminCommand({forgetShardSplit: 1, migrationId: splitMigrationId})); - - splitRecipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - waitForGarbageCollectionForSplit(splitRst.nodes, splitMigrationId, tenantIds); - - test.stop(); - jsTestLog("cannotStartMigrationWhileShardSplitIsInProgress test completed"); -} - -cannotStartMigrationWhileShardSplitIsInProgress("multitenant migrations"); -cannotStartMigrationWhileShardSplitIsInProgress("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_fail_on_recipient.js b/jstests/serverless/serverless_reject_multiple_ops_migration_fail_on_recipient.js deleted file mode 100644 index 70447f44323..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_fail_on_recipient.js +++ /dev/null @@ -1,78 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartMigrationWhileShardSplitIsInProgressOnRecipient(protocol) { - // Test that we cannot start a migration while a shard split is in progress. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - const splitRst = test.getRecipientRst(); - - let splitRecipientNodes = addRecipientNodes(splitRst, recipientTagName); - - let fp = configureFailPoint(splitRst.getPrimary(), "pauseShardSplitBeforeBlockingState"); - - const commitThread = - commitSplitAsync(splitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - fp.wait(); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(migrationOpts)); - - const result = assert.commandWorked(test.waitForMigrationToComplete(migrationOpts)); - assert.eq(result.state, "aborted"); - assert.eq(result.abortReason.code, ErrorCodes.ConflictingServerlessOperation); - - assert.commandWorked( - test.forgetMigration(migrationOpts.migrationIdString, false /* retryOnRetryableErrors */)); - - fp.off(); - - assert.commandWorked(commitThread.returnData()); - - splitRst.nodes = splitRst.nodes.filter(node => !splitRecipientNodes.includes(node)); - splitRst.ports = - splitRst.ports.filter(port => !splitRecipientNodes.some(node => node.port === port)); - - assert.commandWorked( - splitRst.getPrimary().adminCommand({forgetShardSplit: 1, migrationId: splitMigrationId})); - - splitRecipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - waitForGarbageCollectionForSplit(splitRst.nodes, splitMigrationId, tenantIds); - - test.stop(); - jsTestLog("cannotStartMigrationWhileShardSplitIsInProgressOnRecipient test completed"); -} - -cannotStartMigrationWhileShardSplitIsInProgressOnRecipient("multitenant migrations"); -cannotStartMigrationWhileShardSplitIsInProgressOnRecipient("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_migration_recipient_retry.js b/jstests/serverless/serverless_reject_multiple_ops_migration_recipient_retry.js deleted file mode 100644 index f3bd83a5243..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_migration_recipient_retry.js +++ /dev/null @@ -1,90 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartMigrationWhileShardSplitIsInProgressOnRecipient(protocol) { - // Test that we cannot start a migration while a shard split is in progress. - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - const secondTenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - - const splitRst = test.getRecipientRst(); - - let splitRecipientNodes = addRecipientNodes(splitRst, recipientTagName); - - let fp = configureFailPoint(splitRst.getPrimary(), "pauseShardSplitBeforeBlockingState"); - - const commitThread = - commitSplitAsync(splitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - fp.wait(); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(migrationOpts)); - - const result = assert.commandWorked(test.waitForMigrationToComplete(migrationOpts)); - assert.eq(result.state, "aborted"); - assert.eq(result.abortReason.code, ErrorCodes.ConflictingServerlessOperation); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - fp.off(); - - assert.commandWorked(commitThread.returnData()); - - splitRst.nodes = splitRst.nodes.filter(node => !splitRecipientNodes.includes(node)); - splitRst.ports = - splitRst.ports.filter(port => !splitRecipientNodes.some(node => node.port === port)); - - assert.commandWorked( - splitRst.getPrimary().adminCommand({forgetShardSplit: 1, migrationId: splitMigrationId})); - - splitRecipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - const secondMigrationOpts = { - migrationIdString: extractUUIDFromObject(secondTenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - secondMigrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(secondMigrationOpts)); - TenantMigrationTest.assertCommitted( - waitForMergeToComplete(secondMigrationOpts, secondTenantMigrationId, test)); - assert.commandWorked(test.forgetMigration(secondMigrationOpts.migrationIdString)); - - waitForGarbageCollectionForSplit(splitRst.nodes, splitMigrationId, tenantIds); - - test.stop(); - jsTestLog("cannotStartMigrationWhileShardSplitIsInProgressOnRecipient test completed"); -} - -cannotStartMigrationWhileShardSplitIsInProgressOnRecipient("multitenant migrations"); -cannotStartMigrationWhileShardSplitIsInProgressOnRecipient("shard merge"); diff --git a/jstests/serverless/serverless_reject_multiple_ops_split.js b/jstests/serverless/serverless_reject_multiple_ops_split.js deleted file mode 100644 index 71ea388a9a7..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_split.js +++ /dev/null @@ -1,82 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function cannotStartShardSplitWithMigrationInProgress( - {recipientTagName, protocol, shardSplitRst, test}) { - // Test that we cannot start a shard split while a migration is in progress. - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - let fp = configureFailPoint(test.getDonorRst().getPrimary(), - "pauseTenantMigrationBeforeLeavingDataSyncState"); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(migrationOpts)); - - fp.wait(); - - const commitThread = commitSplitAsync( - shardSplitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - assert.commandFailed(commitThread.returnData()); - - fp.off(); - - TenantMigrationTest.assertCommitted( - waitForMergeToComplete(migrationOpts, tenantMigrationId, test)); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - jsTestLog("cannotStartShardSplitWithMigrationInProgress test completed"); -} - -sharedOptions = {}; -sharedOptions["setParameter"] = { - shardSplitGarbageCollectionDelayMS: 0, - tenantMigrationGarbageCollectionDelayMS: 0, - ttlMonitorSleepSecs: 1 -}; - -const recipientTagName = "recipientTag"; - -const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); -addRecipientNodes(test.getDonorRst(), recipientTagName); -addRecipientNodes(test.getRecipientRst(), recipientTagName); - -cannotStartShardSplitWithMigrationInProgress({ - recipientTagName, - protocol: "multitenant migrations", - shardSplitRst: test.getDonorRst(), - test -}); -cannotStartShardSplitWithMigrationInProgress( - {recipientTagName, protocol: "shard merge", shardSplitRst: test.getDonorRst(), test}); - -cannotStartShardSplitWithMigrationInProgress({ - recipientTagName, - protocol: "multitenant migrations", - shardSplitRst: test.getRecipientRst(), - test -}); -cannotStartShardSplitWithMigrationInProgress( - {recipientTagName, protocol: "shard merge", shardSplitRst: test.getRecipientRst(), test}); - -test.stop(); diff --git a/jstests/serverless/serverless_reject_multiple_ops_split_retry.js b/jstests/serverless/serverless_reject_multiple_ops_split_retry.js deleted file mode 100644 index b4716494af6..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_split_retry.js +++ /dev/null @@ -1,127 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function retrySplit({protocol, recipientTagName, recipientSetName, tenantIds, test, splitRst}) { - const tenantMigrationId = UUID(); - const firstSplitMigrationId = UUID(); - const secondSplitMigrationId = UUID(); - - let recipientNodes = addRecipientNodes(splitRst, recipientTagName); - - let fp = configureFailPoint(test.getDonorRst().getPrimary(), - "pauseTenantMigrationBeforeLeavingDataSyncState"); - - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(migrationOpts)); - - fp.wait(); - - const commitThread = commitSplitAsync( - splitRst, tenantIds, recipientTagName, recipientSetName, firstSplitMigrationId); - assert.commandFailed(commitThread.returnData()); - - fp.off(); - - TenantMigrationTest.assertCommitted( - waitForMergeToComplete(migrationOpts, tenantMigrationId, test)); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - // Potential race condition as we do not know how quickly the future continuation in - // PrimaryOnlyService will remove the instance from its map. - sleep(1000); - const secondCommitThread = commitSplitAsync( - splitRst, tenantIds, recipientTagName, recipientSetName, secondSplitMigrationId); - assert.commandWorked(secondCommitThread.returnData()); - - splitRst.nodes = splitRst.nodes.filter(node => !recipientNodes.includes(node)); - splitRst.ports = - splitRst.ports.filter(port => !recipientNodes.some(node => node.port === port)); - - assert.commandWorked(splitRst.getPrimary().getDB("admin").runCommand( - {forgetShardSplit: 1, migrationId: secondSplitMigrationId})); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); -} - -// Test that we cannot start a shard split while a migration is in progress. -const recipientTagName = "recipientTag"; -const recipientSetName = "recipient"; -const tenantIds = ["tenant1", "tenant2"]; - -sharedOptions = { - setParameter: { - shardSplitGarbageCollectionDelayMS: 0, - tenantMigrationGarbageCollectionDelayMS: 0, - ttlMonitorSleepSecs: 1 - } -}; - -const test = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - -// "multitenant migration" with shard split on donor -retrySplit({ - protocol: "multitenant migrations", - recipientTagName, - recipientSetName, - tenantIds, - test, - splitRst: test.getDonorRst() -}); - -// "multitenant migration" with shard split on recipient -retrySplit({ - protocol: "multitenant migrations", - recipientTagName, - recipientSetName, - tenantIds, - test, - splitRst: test.getRecipientRst() -}); - -// "shard merge" with shard split on donor -retrySplit({ - protocol: "shard merge", - recipientTagName, - recipientSetName, - tenantIds, - test, - splitRst: test.getDonorRst() -}); - -test.stop(); - -// We need a new test for the next shard merge as adding nodes will cause a crash. -const test2 = new TenantMigrationTest({quickGarbageCollection: true, sharedOptions}); - -// "shard merge" with shard split on recipient -retrySplit({ - protocol: "multitenant migrations", - recipientTagName, - recipientSetName, - tenantIds, - test: test2, - splitRst: test2.getDonorRst() -}); - -test2.stop(); diff --git a/jstests/serverless/serverless_reject_multiple_ops_split_success.js b/jstests/serverless/serverless_reject_multiple_ops_split_success.js deleted file mode 100644 index 4f77bae0293..00000000000 --- a/jstests/serverless/serverless_reject_multiple_ops_split_success.js +++ /dev/null @@ -1,61 +0,0 @@ -/** - * @tags: [ - * serverless, - * requires_fcv_52, - * featureFlagShardSplit, - * featureFlagShardMerge - * ] - */ - -load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/serverless/libs/serverless_reject_multiple_ops_utils.js"); -load("jstests/libs/uuid_util.js"); - -function canStartShardSplitWithAbortedMigration({protocol, runOnRecipient}) { - const recipientTagName = "recipientTag"; - const recipientSetName = "recipient"; - const tenantIds = ["tenant1", "tenant2"]; - const splitMigrationId = UUID(); - const tenantMigrationId = UUID(); - - sharedOptions = {}; - sharedOptions["setParameter"] = {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}; - - const test = new TenantMigrationTest({quickGarbageCollection: false, sharedOptions}); - - const shardSplitRst = runOnRecipient ? test.getRecipientRst() : test.getDonorRst(); - - let recipientNodes = addRecipientNodes(shardSplitRst, recipientTagName); - - let fp = configureFailPoint(test.getDonorRst().getPrimary(), - "abortTenantMigrationBeforeLeavingBlockingState"); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(tenantMigrationId), - protocol, - }; - if (protocol != "shard merge") { - migrationOpts["tenantId"] = tenantIds[0]; - } - jsTestLog("Starting tenant migration"); - assert.commandWorked(test.startMigration(migrationOpts)); - - TenantMigrationTest.assertAborted( - waitForMergeToComplete(migrationOpts, tenantMigrationId, test)); - assert.commandWorked(test.forgetMigration(migrationOpts.migrationIdString)); - - const commitThread = commitSplitAsync( - shardSplitRst, tenantIds, recipientTagName, recipientSetName, splitMigrationId); - assert.commandWorked(commitThread.returnData()); - - recipientNodes.forEach(node => { - MongoRunner.stopMongod(node); - }); - - test.stop(); - jsTestLog("canStartShardSplitWithAbortedMigration test completed"); -} - -canStartShardSplitWithAbortedMigration({protocol: "multitenant migrations", runOnRecipient: false}); -canStartShardSplitWithAbortedMigration({protocol: "shard merge", runOnRecipient: false}); diff --git a/jstests/serverless/shard_split_recipient_removes_access_blockers.js b/jstests/serverless/shard_split_recipient_removes_access_blockers.js index 5398c8aba05..546b44c4d08 100644 --- a/jstests/serverless/shard_split_recipient_removes_access_blockers.js +++ b/jstests/serverless/shard_split_recipient_removes_access_blockers.js @@ -1,5 +1,5 @@ /* - * Test that tenant access blockers are removed when applying the recipient config + * Test that tenant access blockers are removed from recipients when applying the recipient config. * * @tags: [requires_fcv_52, featureFlagShardSplit, serverless] */ diff --git a/jstests/serverless/shard_split_recipient_removes_serverless_lock.js b/jstests/serverless/shard_split_recipient_removes_serverless_lock.js deleted file mode 100644 index adb55c8753e..00000000000 --- a/jstests/serverless/shard_split_recipient_removes_serverless_lock.js +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Test the serverless operation lock is released from recipients when the state document is - * removed. - * - * @tags: [requires_fcv_62, featureFlagShardSplit, serverless] - */ - -load("jstests/libs/fail_point_util.js"); -load("jstests/serverless/libs/basic_serverless_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -const {ServerlessLockType, getServerlessOperationLock} = TenantMigrationUtil; - -(function() { -"use strict"; - -// Skip db hash check because secondary is left with a different config. -TestData.skipCheckDBHashes = true; - -const test = new BasicServerlessTest({ - recipientTagName: "recipientNode", - recipientSetName: "recipient", - quickGarbageCollection: true -}); -test.addRecipientNodes(); - -const donorPrimary = test.donor.getPrimary(); -const tenantIds = ["tenant1", "tenant2"]; -const operation = test.createSplitOperation(tenantIds); - -const donorAfterBlockingFailpoint = - configureFailPoint(donorPrimary.getDB("admin"), "pauseShardSplitAfterBlocking"); - -const commitOp = operation.commitAsync(); -donorAfterBlockingFailpoint.wait(); - -jsTestLog("Asserting recipient nodes have installed the serverless lock"); -assert.soon(() => test.recipientNodes.every(node => getServerlessOperationLock(node) === - ServerlessLockType.ShardSplitDonor)); -donorAfterBlockingFailpoint.off(); - -commitOp.join(); -assert.commandWorked(commitOp.returnData()); - -jsTestLog("Asserting the serverless exclusion lock has been released"); -assert.soon(() => test.recipientNodes.every(node => getServerlessOperationLock(node) == - ServerlessLockType.None)); - -test.stop(); -})(); diff --git a/jstests/serverless/shard_split_startup_recovery_initially_aborted.js b/jstests/serverless/shard_split_startup_recovery_initially_aborted.js index faf0245b41a..f6301fa7f9a 100644 --- a/jstests/serverless/shard_split_startup_recovery_initially_aborted.js +++ b/jstests/serverless/shard_split_startup_recovery_initially_aborted.js @@ -1,9 +1,7 @@ /** - * Starts a shard split through `abortShardSplit` and assert that no tenant access blockers are + * Starts a shard split througt `abortShardSplit` and assert that no tenant access blockers are * recovered since we do not recover access blockers for aborted split marked garbage collectable. - * Also verifies the serverless operation lock is not acquired when starting a split in aborted - * state. - * @tags: [requires_fcv_62, featureFlagShardSplit] + * @tags: [requires_fcv_52, featureFlagShardSplit] */ load("jstests/libs/fail_point_util.js"); // for "configureFailPoint" @@ -11,8 +9,6 @@ load('jstests/libs/parallel_shell_helpers.js'); // for "startPa load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // for "setParameter" load("jstests/serverless/libs/basic_serverless_test.js"); load("jstests/replsets/libs/tenant_migration_test.js"); -load("jstests/replsets/libs/tenant_migration_util.js"); -const {ServerlessLockType, getServerlessOperationLock} = TenantMigrationUtil; (function() { "use strict"; @@ -63,8 +59,5 @@ tenantIds.every(tenantId => { {node: donorPrimary, tenantId: tenantId})); }); -// We do not acquire the lock for document marked for garbage collection -assert.eq(getServerlessOperationLock(donorPrimary), ServerlessLockType.None); - test.stop(); })(); diff --git a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js index abe9cb50808..846f2ff70c1 100644 --- a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js +++ b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js @@ -11,7 +11,7 @@ load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); // Define base test cases. For each test case: // - 'shardKey' is the shard key being analyzed. // - 'indexKey' is the index that the collection has. -// - 'indexOptions' is the additional options for the index. +// - 'indexCollation' is the collation for the index. const shardKeyPrefixedIndexTestCases = [ // Test non-compound shard keys with a shard key index. {shardKey: {a: 1}, indexKey: {a: 1}, expectMetrics: true}, @@ -31,12 +31,7 @@ const shardKeyPrefixedIndexTestCases = [ {shardKey: {_id: 1}, indexKey: {_id: 1}, expectMetrics: true}, {shardKey: {_id: 1, a: 1}, indexKey: {_id: 1, a: 1}, expectMetrics: true}, // Test shard key indexes with simple collation. - { - shardKey: {a: 1}, - indexKey: {a: 1}, - indexOptions: {collation: {locale: "simple"}}, - expectMetrics: true - }, + {shardKey: {a: 1}, indexKey: {a: 1}, indexCollation: {locale: "simple"}, expectMetrics: true}, ]; const compatibleIndexTestCases = [ @@ -52,7 +47,7 @@ const compatibleIndexTestCases = [ { shardKey: {a: 1}, indexKey: {a: "hashed"}, - indexOptions: {collation: {locale: "simple"}}, + indexCollation: {locale: "simple"}, expectMetrics: true }, ]; @@ -66,17 +61,7 @@ const noIndexTestCases = [ { shardKey: {a: 1}, indexKey: {a: 1}, - indexOptions: {collation: {locale: "fr"}}, // non-simple collation. - }, - { - shardKey: {a: 1}, - indexKey: {a: 1}, - indexOptions: {sparse: true}, - }, - { - shardKey: {a: 1}, - indexKey: {a: 1}, - indexOptions: {partialFilterExpression: {a: {$gte: 1}}}, + indexCollation: {locale: "fr"}, // non-simple collation. }, ]; @@ -84,48 +69,43 @@ const noIndexTestCases = [ const candidateKeyTestCases = []; const currentKeyTestCases = []; -for (let testCaseBase of shardKeyPrefixedIndexTestCases) { - if (!AnalyzeShardKeyUtil.isIdKeyPattern(testCaseBase.indexKey)) { - const testCase = Object.extend({indexOptions: {}}, testCaseBase, true /* deep */); - testCase.indexOptions.unique = false; - testCase.expectUnique = false; - candidateKeyTestCases.push(testCase); - currentKeyTestCases.push(testCase); +for (let testCase of shardKeyPrefixedIndexTestCases) { + if (!AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { + const nonUniqueIndexTestCase = + Object.assign({isUniqueIndex: false, expectUnique: false}, testCase); + candidateKeyTestCases.push(nonUniqueIndexTestCase); + currentKeyTestCases.push(nonUniqueIndexTestCase); } - if (!AnalyzeShardKeyUtil.isHashedKeyPattern(testCaseBase.indexKey)) { + if (!AnalyzeShardKeyUtil.isHashedKeyPattern(testCase.indexKey)) { // Hashed indexes cannot have a uniqueness constraint. - const testCase = Object.extend({indexOptions: {}}, testCaseBase, true /* deep */); - testCase.indexOptions.unique = true; - testCase.expectUnique = - Object.keys(testCaseBase.shardKey).length == Object.keys(testCaseBase.indexKey).length; - candidateKeyTestCases.push(testCase); - currentKeyTestCases.push(testCase); + const expectUnique = + Object.keys(testCase.shardKey).length == Object.keys(testCase.indexKey).length; + const uniqueIndexTestCase = Object.assign({isUniqueIndex: true, expectUnique}, testCase); + candidateKeyTestCases.push(uniqueIndexTestCase); + currentKeyTestCases.push(uniqueIndexTestCase); } } -for (let testCaseBase of compatibleIndexTestCases) { - if (!AnalyzeShardKeyUtil.isIdKeyPattern(testCaseBase.indexKey)) { - const testCase = Object.extend({indexOptions: {}}, testCaseBase, true /* deep */); - testCase.indexOptions.unique = false; - testCase.expectUnique = false; - candidateKeyTestCases.push(testCase); +for (let testCase of compatibleIndexTestCases) { + if (!AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { + const nonUniqueIndexTestCase = + Object.assign({isUniqueIndex: false, expectUnique: false}, testCase); + candidateKeyTestCases.push(nonUniqueIndexTestCase); } - if (!AnalyzeShardKeyUtil.isHashedKeyPattern(testCaseBase.indexKey)) { + if (!AnalyzeShardKeyUtil.isHashedKeyPattern(testCase.indexKey)) { // Hashed indexes cannot have a uniqueness constraint. - const testCase = Object.extend({indexOptions: {}}, testCaseBase, true /* deep */); - testCase.indexOptions.unique = true; - testCase.expectUnique = - Object.keys(testCaseBase.shardKey).length == Object.keys(testCaseBase.indexKey).length; - candidateKeyTestCases.push(testCase); + const expectUnique = + Object.keys(testCase.shardKey).length == Object.keys(testCase.indexKey).length; + const uniqueIndexTestCase = Object.assign({isUniqueIndex: true, expectUnique}, testCase); + candidateKeyTestCases.push(uniqueIndexTestCase); } } -for (let testCaseBase of noIndexTestCases) { +for (let testCase of noIndexTestCases) { // No metrics are expected for these test cases so there is no need to test with both non-unique // and unique index. - const testCase = Object.extend({indexOptions: {}}, testCaseBase, true /* deep */); - testCase.indexOptions.unique = false; - candidateKeyTestCases.push(testCase); + const nonUniqueIndexTestCase = Object.assign({isUniqueIndex: false}, testCase); + candidateKeyTestCases.push(nonUniqueIndexTestCase); } function assertNoMetrics(res) { @@ -204,7 +184,7 @@ function makeDocument(fieldNames, val) { * supporting/compatible index or doesn't a supporting/compatible index. */ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKey, testCase) { - assert(!testCase.indexOptions.unique); + assert(!testCase.isUniqueIndex); const ns = dbName + "." + collName; const db = conn.getDB(dbName); @@ -335,7 +315,7 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe * supporting/compatible index. */ function testAnalyzeShardKeyUniqueIndex(conn, dbName, collName, currentShardKey, testCase) { - assert(testCase.indexOptions.unique); + assert(testCase.isUniqueIndex); assert(testCase.expectMetrics); const ns = dbName + "." + collName; @@ -438,11 +418,15 @@ function testAnalyzeCandidateShardKeysUnshardedCollection(conn, mongodConns) { jsTest.log(`Testing metrics for ${tojson({dbName, collName, testCase})}`); if (testCase.indexKey && !AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { - assert.commandWorked(coll.createIndex(testCase.indexKey, testCase.indexOptions)); + const indexOptions = {unique: testCase.isUniqueIndex}; + if (testCase.indexCollation) { + indexOptions.collation = testCase.indexCollation; + } + assert.commandWorked(coll.createIndex(testCase.indexKey, indexOptions)); } AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); - if (testCase.indexOptions.unique) { + if (testCase.isUniqueIndex) { testAnalyzeShardKeyUniqueIndex( conn, dbName, collName, null /* currentShardKey */, testCase); } else { @@ -485,7 +469,7 @@ function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { } const testCase = Object.assign({}, testCaseBase); - if (currentShardKey && testCase.indexOptions.unique) { + if (currentShardKey && testCase.isUniqueIndex) { // It is illegal to create a unique index that doesn't have the shard key as a prefix. assert(testCase.indexKey); testCase.shardKey = Object.assign({}, currentShardKey, testCase.shardKey); @@ -497,11 +481,15 @@ function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { jsTest.log(`Testing metrics for ${tojson({dbName, collName, currentShardKey, testCase})}`); if (testCase.indexKey && !AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { - assert.commandWorked(coll.createIndex(testCase.indexKey, testCase.indexOptions)); + const indexOptions = {unique: testCase.isUniqueIndex}; + if (testCase.indexCollation) { + indexOptions.collation = testCase.indexCollation; + } + assert.commandWorked(coll.createIndex(testCase.indexKey, indexOptions)); } AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); - if (testCase.indexOptions.unique) { + if (testCase.isUniqueIndex) { testAnalyzeShardKeyUniqueIndex(st.s, dbName, collName, currentShardKey, testCase); } else { testAnalyzeShardKeyNoUniqueIndex(st.s, dbName, collName, currentShardKey, testCase); @@ -541,7 +529,11 @@ function testAnalyzeCurrentShardKeys(st, mongodConns) { jsTest.log(`Testing metrics for ${tojson({dbName, collName, currentShardKey, testCase})}`); if (!AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { - assert.commandWorked(coll.createIndex(testCase.indexKey, testCase.indexOptions)); + const indexOptions = {unique: testCase.isUniqueIndex}; + if (testCase.indexCollation) { + indexOptions.collation = testCase.indexCollation; + } + assert.commandWorked(coll.createIndex(testCase.indexKey, indexOptions)); } assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: currentShardKey})); @@ -557,7 +549,7 @@ function testAnalyzeCurrentShardKeys(st, mongodConns) { AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); - if (testCase.indexOptions.unique) { + if (testCase.isUniqueIndex) { testAnalyzeShardKeyUniqueIndex(st.s, dbName, collName, currentShardKey, testCase); } else { testAnalyzeShardKeyNoUniqueIndex(st.s, dbName, collName, currentShardKey, testCase); diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js deleted file mode 100644 index 94251101ab4..00000000000 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Tests that the configureQueryAnalyzer command persists the configuration in a document - * in config.queryAnalyzers and that the document is deleted when the associated collection - * is dropped. - * - * @tags: [requires_fcv_62, featureFlagAnalyzeShardKey] - */ - -(function() { -"use strict"; - -const dbName = "testDb"; - -/** - * TestCase: { - * command: {ns : "coll namespace", - * mode : "full"|"off", - * sampleRate : 1.2}, - * } - * - */ - -/** - * Create documents represting all combinations of options for configureQueryAnalyzer command. - * @returns array of documents - */ -function optionsAllCombinations() { - const testCases = []; - const collName = "collection"; - for (const mode of ["off", "full"]) { - for (const sampleRate of [null, -1.0, 0.0, 0.2]) { - let testCase = - Object.assign({}, {command: {ns: dbName + "." + collName, mode, sampleRate}}); - if (sampleRate == null) { - delete testCase.command.sampleRate; - } - if ((mode == "off" && sampleRate !== null) || - (mode == "full" && - (sampleRate == null || typeof sampleRate !== "number" || sampleRate <= 0.0))) { - continue; // These cases are tested in configuer_query_analyzer_basic.js. - } - testCases.push(testCase); - } - } - return testCases; -} - -function assertConfigQueryAnalyzerResponse(res, mode, sampleRate) { - assert.eq(res.ok, 1); - assert.eq(res.mode, mode); - assert.eq(res.sampleRate, sampleRate); -} - -function assertQueryAnalyzerConfigDoc(configDb, db, collName, mode, sampleRate) { - const configColl = configDb.getCollection('queryAnalyzers'); - const listCollRes = - assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}})); - const uuid = listCollRes.cursor.firstBatch[0].info.uuid; - const doc = configColl.findOne({_id: uuid}); - assert.eq(doc.mode, mode, doc); - if (mode == "off") { - assert.eq(doc.hasOwnProperty("sampleRate"), false, doc); - } else if (mode == "full") { - assert.eq(doc.sampleRate, sampleRate, doc); - } -} - -function assertNoQueryAnalyzerConfigDoc(configDb, db, collName) { - const configColl = configDb.getCollection('queryAnalyzers'); - const listCollRes = - assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}})); - assert.eq(listCollRes.cursor.firstBatch, 0); -} - -function testConfigurationOptions(conn, testCases) { - const collName = "collection"; - const ns = dbName + "." + collName; - const db = conn.getDB(dbName); - const coll = db.getCollection(ns); - let config = conn.getDB('config'); - assert.commandWorked(coll.remove({})); - assert.commandWorked(db.runCommand({insert: collName, documents: [{x: 1}]})); - - testCases.forEach(testCase => { - jsTest.log(`Running configureQueryAnalyzer command on test case ${tojson(testCase)}`); - - const res = conn.adminCommand({ - configureQueryAnalyzer: testCase.command.ns, - mode: testCase.command.mode, - sampleRate: testCase.command.sampleRate - }); - assert.commandWorked(res); - assertConfigQueryAnalyzerResponse(res, testCase.command.mode, testCase.command.sampleRate); - assertQueryAnalyzerConfigDoc( - config, db, collName, testCase.command.mode, testCase.command.sampleRate); - }); -} - -function testDropCollectionDeletesConfig(conn) { - const db = conn.getDB(dbName); - - const collNameSh = "collection2DropSh"; - const nsSh = dbName + "." + collNameSh; - const collSh = db.getCollection(collNameSh); - const collNameUnsh = "collection2DropUnsh"; - const nsUnsh = dbName + "." + collNameUnsh; - const collUnsh = db.getCollection(collNameUnsh); - - const config = conn.getDB('config'); - const shardKey = {skey: 1}; - const shardKeySplitPoint = {skey: 2}; - - jsTest.log('Testing drop collection deletes query analyzer config doc'); - - assert.commandWorked(conn.adminCommand({shardCollection: nsSh, key: shardKey})); - assert.commandWorked(conn.adminCommand({split: nsSh, middle: shardKeySplitPoint})); - - assert.commandWorked(db.runCommand({insert: collNameSh, documents: [{skey: 1, y: 1}]})); - assert.commandWorked(db.runCommand({insert: collNameUnsh, documents: [{skey: 1, y: 1}]})); - - // sharded collection - - const mode = "full"; - const sampleRate = 0.5; - const resSh = - conn.adminCommand({configureQueryAnalyzer: nsSh, mode: mode, sampleRate: sampleRate}); - assert.commandWorked(resSh); - assertConfigQueryAnalyzerResponse(resSh, mode, sampleRate); - assertQueryAnalyzerConfigDoc(config, db, collNameSh, mode, sampleRate); - - collSh.drop(); - assertNoQueryAnalyzerConfigDoc(config, db, collNameSh); - - // unsharded collection - - const resUnsh = - conn.adminCommand({configureQueryAnalyzer: nsUnsh, mode: mode, sampleRate: sampleRate}); - assert.commandWorked(resUnsh); - assertConfigQueryAnalyzerResponse(resUnsh, mode, sampleRate); - assertQueryAnalyzerConfigDoc(config, db, collNameUnsh, mode, sampleRate); - - collUnsh.drop(); - assertNoQueryAnalyzerConfigDoc(config, db, collNameUnsh); -} - -function testDropDatabaseDeletesConfig(conn) { - let db = conn.getDB(dbName); - const collNameSh = "collection2DropSh"; - const nsSh = dbName + "." + collNameSh; - const collSh = db.getCollection(nsSh); - - const config = conn.getDB('config'); - const shardKey = {skey: 1}; - const shardKeySplitPoint = {skey: 2}; - - jsTest.log('Testing drop database deletes query analyzer config doc'); - assert.commandWorked(conn.adminCommand({shardCollection: nsSh, key: shardKey})); - assert.commandWorked(conn.adminCommand({split: nsSh, middle: shardKeySplitPoint})); - assert.commandWorked(db.runCommand({insert: collNameSh, documents: [{skey: 1, y: 1}]})); - - // sharded collection - - const mode = "full"; - const sampleRate = 0.5; - const resSh = - conn.adminCommand({configureQueryAnalyzer: nsSh, mode: mode, sampleRate: sampleRate}); - assert.commandWorked(resSh); - assertConfigQueryAnalyzerResponse(resSh, mode, sampleRate); - assertQueryAnalyzerConfigDoc(config, db, collNameSh, mode, sampleRate); - db.dropDatabase(); - assertNoQueryAnalyzerConfigDoc(config, db, collNameSh); - - // unsharded collection - - db = conn.getDB(dbName); - const collNameUnsh = "collection2DropUnsh"; - const nsUnsh = dbName + "." + collNameUnsh; - const collUnsh = db.getCollection(nsUnsh); - assert.commandWorked(db.runCommand({insert: collNameUnsh, documents: [{skey: 1, y: 1}]})); - - const resUnsh = - conn.adminCommand({configureQueryAnalyzer: nsUnsh, mode: mode, sampleRate: sampleRate}); - assert.commandWorked(resUnsh); - assertConfigQueryAnalyzerResponse(resUnsh, mode, sampleRate); - assertQueryAnalyzerConfigDoc(config, db, collNameUnsh, mode, sampleRate); - db.dropDatabase(); - assertNoQueryAnalyzerConfigDoc(config, db, collNameUnsh); -} - -{ - const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); - - st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.name}); - - const AllTestCases = optionsAllCombinations(); - testConfigurationOptions(st.s, AllTestCases); - - testDropCollectionDeletesConfig(st.s); - testDropDatabaseDeletesConfig(st.s); - - st.stop(); -} -})(); diff --git a/jstests/sharding/store_historical_placement_data.js b/jstests/sharding/store_historical_placement_data.js index 844d46cf54e..65a02f70f2a 100644 --- a/jstests/sharding/store_historical_placement_data.js +++ b/jstests/sharding/store_historical_placement_data.js @@ -34,7 +34,6 @@ const databaseDetails = databaseEntries[0]; assert(timestampCmp(databaseDetails.version.timestamp, placementDetails.timestamp) == 0); assert.eq(1, placementDetails.shards.length); assert.eq(databaseDetails.primary, placementDetails.shards[0]); -assert.eq(undefined, placementDetails.uuid); st.stop(); }()); diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml index 755ae596893..216b40d2a9e 100644 --- a/src/mongo/base/error_codes.yml +++ b/src/mongo/base/error_codes.yml @@ -494,14 +494,13 @@ error_codes: - {code: 381, name: ReshardingCoordinatorServiceConflictingOperationInProgress, extra: ReshardingCoordinatorServiceConflictingOperationInProgressInfo, categories: [InternalOnly]} + - {code: 382, name: RemoteCommandExecutionError, extra: RemoteCommandExecutionErrorInfo, categories: [InternalOnly]} - {code: 383, name: CollectionIsEmptyLocally, categories: [InternalOnly]} - {code: 384, name: ConnectionError, categories: [NetworkError,RetriableError,InternalOnly]} - - {code: 385, name: ConflictingServerlessOperation} - # Error codes 4000-8999 are reserved. # Non-sequential error codes for compatibility only) diff --git a/src/mongo/base/status_with.h b/src/mongo/base/status_with.h index 0781fd99480..fe85fa3f48d 100644 --- a/src/mongo/base/status_with.h +++ b/src/mongo/base/status_with.h @@ -151,6 +151,110 @@ public: return _status.isOK(); } + /** + * For any type U returned by a function f, transform creates a StatusWith by either applying + * the function to the _t member or forwarding the _status. This is the lvalue overload. + */ + template + StatusWith> transform(F&& f) & { + if (_t) + return {std::forward(f)(*_t)}; + else + return {_status}; + } + + /** + * For any type U returned by a function f, transform creates a StatusWith by either applying + * the function to the _t member or forwarding the _status. This is the const overload. + */ + template + StatusWith> transform(F&& f) const& { + if (_t) + return {std::forward(f)(*_t)}; + else + return {_status}; + } + + /** + * For any type U returned by a function f, transform creates a StatusWith by either applying + * the function to the _t member or forwarding the _status. This is the rvalue overload. + */ + template + StatusWith> transform(F&& f) && { + if (_t) + return {std::forward(f)(*std::move(_t))}; + else + return {std::move(_status)}; + } + + /** + * For any type U returned by a function f, transform creates a StatusWith by either applying + * the function to the _t member or forwarding the _status. This is the const rvalue overload. + */ + template + StatusWith> transform(F&& f) const&& { + if (_t) + return {std::forward(f)(*std::move(_t))}; + else + return {std::move(_status)}; + } + + /** + * For any type U returned inside a StatusWith by a function f, andThen directly produces a + * StatusWith by applying the function to the _t member or creates one by forwarding the + * _status. andThen performs the same function as transform but for a function f with a return + * type of StatusWith. This is the lvalue overload. + */ + template + StatusWith::value_type> andThen(F&& f) & { + if (_t) + return {std::forward(f)(*_t)}; + else + return {_status}; + } + + /** + * For any type U returned inside a StatusWith by a function f, andThen directly produces a + * StatusWith by applying the function to the _t member or creates one by forwarding the + * _status. andThen performs the same function as transform but for a function f with a return + * type of StatusWith. This is the const overload. + */ + template + StatusWith::value_type> andThen(F&& f) const& { + if (_t) + return {std::forward(f)(*_t)}; + else + return {_status}; + } + + /** + * For any type U returned inside a StatusWith by a function f, andThen directly produces a + * StatusWith by applying the function to the _t member or creates one by forwarding the + * _status. andThen performs the same function as transform but for a function f with a return + * type of StatusWith. This is the rvalue overload. + */ + template + StatusWith::value_type> andThen(F&& f) && { + if (_t) + return {std::forward(f)(*std::move(_t))}; + else + return {std::move(_status)}; + } + + /** + * For any type U returned inside a StatusWith by a function f, andThen directly produces a + * StatusWith by applying the function to the _t member or creates one by forwarding the + * _status. andThen performs the same function as transform but for a function f with a return + * type of StatusWith. This is the const rvalue overload. + */ + template + StatusWith::value_type> andThen(F&& f) const&& { + if (_t) + return {std::forward(f)(*std::move(_t))}; + else + return {std::move(_status)}; + } + /** * This method is a transitional tool, to facilitate transition to compile-time enforced status * checking. diff --git a/src/mongo/base/status_with_test.cpp b/src/mongo/base/status_with_test.cpp index fb3956ffaa7..d115815564e 100644 --- a/src/mongo/base/status_with_test.cpp +++ b/src/mongo/base/status_with_test.cpp @@ -92,5 +92,435 @@ TEST(StatusWith, ignoreTest) { [] { return StatusWith(false); }().getStatus().ignore(); } +TEST(StatusWith, MonadicTestLValue) { + { + auto from = StatusWith{3}; + auto to = from.transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + auto from = StatusWith{3}; + auto to = from.andThen([](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.andThen([](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + auto from = StatusWith{3}; + auto to = from.andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::BadValue, "lousy value"), to.getStatus()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } +} + +TEST(StatusWith, MonadicTestConst) { + { + const auto from = StatusWith{3}; + auto to = from.transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + const auto from = StatusWith{3}; + auto to = from.andThen([](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.andThen([](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + const auto from = StatusWith{3}; + auto to = from.andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::BadValue, "lousy value"), to.getStatus()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = from.andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } +} + +TEST(StatusWith, MonadicTestRValue) { + { + auto from = StatusWith{3}; + auto to = std::move(from).transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + auto from = StatusWith{3}; + auto to = std::move(from).andThen( + [](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).andThen( + [](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + auto from = StatusWith{3}; + auto to = std::move(from).andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::BadValue, "lousy value"), to.getStatus()); + } + { + auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } +} + +TEST(StatusWith, MonadicTestConstRValue) { + { + const auto from = StatusWith{3}; + auto to = std::move(from).transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).transform([](auto&& i) { return static_cast(i); }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::transform returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + const auto from = StatusWith{3}; + auto to = std::move(from).andThen( + [](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(3.0, to.getValue()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).andThen( + [](auto&& i) { return StatusWith{static_cast(i)}; }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } + { + const auto from = StatusWith{3}; + auto to = std::move(from).andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::BadValue, "lousy value"), to.getStatus()); + } + { + const auto from = StatusWith{Status{ErrorCodes::IllegalOperation, "broke the law"}}; + auto to = std::move(from).andThen([](auto&& i) { + return StatusWith{Status{ErrorCodes::BadValue, "lousy value"}}; + }); + static_assert(std::is_same_v, decltype(to)>, + "StatusWith::andThen returns incorrect type"); + ASSERT_EQ(Status(ErrorCodes::IllegalOperation, "broke the law"), to.getStatus()); + } +} + +TEST(StatusWith, Overload) { + struct LValue {}; + struct Const {}; + struct RValue {}; + struct ConstRValue {}; + + struct { + auto operator()(int&) & { + return std::pair{LValue{}, LValue{}}; + } + auto operator()(const int&) & { + return std::pair{Const{}, LValue{}}; + } + auto operator()(int&&) & { + return std::pair{RValue{}, LValue{}}; + } + auto operator()(const int&&) & { + return std::pair{ConstRValue{}, LValue{}}; + } + + auto operator()(int&) const& { + return std::pair{LValue{}, Const{}}; + } + auto operator()(const int&) const& { + return std::pair{Const{}, Const{}}; + } + auto operator()(int&&) const& { + return std::pair{RValue{}, Const{}}; + } + auto operator()(const int&&) const& { + return std::pair{ConstRValue{}, Const{}}; + } + + auto operator()(int&) && { + return std::pair{LValue{}, RValue{}}; + } + auto operator()(const int&) && { + return std::pair{Const{}, RValue{}}; + } + auto operator()(int&&) && { + return std::pair{RValue{}, RValue{}}; + } + auto operator()(const int&&) && { + return std::pair{ConstRValue{}, RValue{}}; + } + + auto operator()(int&) const&& { + return std::pair{LValue{}, ConstRValue{}}; + } + auto operator()(const int&) const&& { + return std::pair{Const{}, ConstRValue{}}; + } + auto operator()(int&&) const&& { + return std::pair{RValue{}, ConstRValue{}}; + } + auto operator()(const int&&) const&& { + return std::pair{ConstRValue{}, ConstRValue{}}; + } + } transformFuncs; + struct { + auto operator()(int&) & { + return StatusWith{std::pair{LValue{}, LValue{}}}; + } + auto operator()(const int&) & { + return StatusWith{std::pair{Const{}, LValue{}}}; + } + auto operator()(int&&) & { + return StatusWith{std::pair{RValue{}, LValue{}}}; + } + auto operator()(const int&&) & { + return StatusWith{std::pair{ConstRValue{}, LValue{}}}; + } + + auto operator()(int&) const& { + return StatusWith{std::pair{LValue{}, Const{}}}; + } + auto operator()(const int&) const& { + return StatusWith{std::pair{Const{}, Const{}}}; + } + auto operator()(int&&) const& { + return StatusWith{std::pair{RValue{}, Const{}}}; + } + auto operator()(const int&&) const& { + return StatusWith{std::pair{ConstRValue{}, Const{}}}; + } + + auto operator()(int&) && { + return StatusWith{std::pair{LValue{}, RValue{}}}; + } + auto operator()(const int&) && { + return StatusWith{std::pair{Const{}, RValue{}}}; + } + auto operator()(int&&) && { + return StatusWith{std::pair{RValue{}, RValue{}}}; + } + auto operator()(const int&&) && { + return StatusWith{std::pair{ConstRValue{}, RValue{}}}; + } + + auto operator()(int&) const&& { + return StatusWith{std::pair{LValue{}, ConstRValue{}}}; + } + auto operator()(const int&) const&& { + return StatusWith{std::pair{Const{}, ConstRValue{}}}; + } + auto operator()(int&&) const&& { + return StatusWith{std::pair{RValue{}, ConstRValue{}}}; + } + auto operator()(const int&&) const&& { + return StatusWith{std::pair{ConstRValue{}, ConstRValue{}}}; + } + } andThenFuncs; + { + auto in = StatusWith{3}; + static_assert(std::is_same_v>, + decltype(in.transform(transformFuncs))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(andThenFuncs))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.transform(std::as_const(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::as_const(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.transform(std::move(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::move(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(in.transform(std::move(std::as_const(transformFuncs))))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::move(std::as_const(andThenFuncs))))>, + "StatusWith::andThen returns incorrect type"); + } + { + const auto in = StatusWith{3}; + static_assert(std::is_same_v>, + decltype(in.transform(transformFuncs))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(andThenFuncs))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.transform(std::as_const(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::as_const(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.transform(std::move(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::move(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(in.transform(std::move(std::as_const(transformFuncs))))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(in.andThen(std::move(std::as_const(andThenFuncs))))>, + "StatusWith::andThen returns incorrect type"); + } + { + auto in = StatusWith{3}; + static_assert(std::is_same_v>, + decltype(std::move(in).transform(transformFuncs))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(andThenFuncs))>, + "StatusWith::andThen returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(std::move(in).transform(std::as_const(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(std::as_const(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).transform(std::move(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(std::move(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).transform( + std::move(std::as_const(transformFuncs))))>, + "StatusWith::transform returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(std::move(in).andThen(std::move(std::as_const(andThenFuncs))))>, + "StatusWith::andThen returns incorrect type"); + } + { + const auto in = StatusWith{3}; + static_assert(std::is_same_v>, + decltype(std::move(in).transform(transformFuncs))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(andThenFuncs))>, + "StatusWith::andThen returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(std::move(in).transform(std::as_const(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(std::as_const(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).transform(std::move(transformFuncs)))>, + "StatusWith::transform returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).andThen(std::move(andThenFuncs)))>, + "StatusWith::andThen returns incorrect type"); + static_assert(std::is_same_v>, + decltype(std::move(in).transform( + std::move(std::as_const(transformFuncs))))>, + "StatusWith::transform returns incorrect type"); + static_assert( + std::is_same_v>, + decltype(std::move(in).andThen(std::move(std::as_const(andThenFuncs))))>, + "StatusWith::andThen returns incorrect type"); + } +} + } // namespace } // namespace mongo diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp index a19b8973db4..035bb5ece61 100644 --- a/src/mongo/client/dbclient_cursor.cpp +++ b/src/mongo/client/dbclient_cursor.cpp @@ -73,7 +73,7 @@ BSONObj addMetadata(DBClientBase* client, BSONObj command) { } Message assembleCommandRequest(DBClientBase* client, - const DatabaseName& dbName, + StringData database, BSONObj commandObj, const ReadPreferenceSetting& readPref) { // Add the $readPreference field to the request. @@ -84,7 +84,7 @@ Message assembleCommandRequest(DBClientBase* client, } commandObj = addMetadata(client, std::move(commandObj)); - auto opMsgRequest = OpMsgRequestBuilder::create(dbName, commandObj); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(database, commandObj); return opMsgRequest.serialize(); } } // namespace @@ -97,7 +97,7 @@ Message DBClientCursor::assembleInit() { // We haven't gotten a cursorId yet so we need to issue the initial find command. invariant(_findRequest); BSONObj findCmd = _findRequest->toBSON(BSONObj()); - return assembleCommandRequest(_client, _ns.dbName(), std::move(findCmd), _readPref); + return assembleCommandRequest(_client, _ns.db(), std::move(findCmd), _readPref); } Message DBClientCursor::assembleGetMore() { @@ -112,7 +112,7 @@ Message DBClientCursor::assembleGetMore() { getMoreRequest.setTerm(static_cast(*_term)); } getMoreRequest.setLastKnownCommittedOpTime(_lastKnownCommittedOpTime); - auto msg = assembleCommandRequest(_client, _ns.dbName(), getMoreRequest.toBSON({}), _readPref); + auto msg = assembleCommandRequest(_client, _ns.db(), getMoreRequest.toBSON({}), _readPref); // Set the exhaust flag if needed. if (_isExhaust) { @@ -215,18 +215,13 @@ void DBClientCursor::dataReceived(const Message& reply, bool& retry, string& hos const auto replyObj = commandDataReceived(reply); _cursorId = 0; // Don't try to kill cursor if we get back an error. - // TODO SERVER-70067: pass in the tenant id to parseFromBSON. auto cr = uassertStatusOK(CursorResponse::parseFromBSON(replyObj)); _cursorId = cr.getCursorId(); uassert(50935, "Received a getMore response with a cursor id of 0 and the moreToCome flag set.", !(_connectionHasPendingReplies && _cursorId == 0)); - // TODO SERVER-70067: Get nss from the parsed cursor directly as it already has the tenant - // information. - _ns = NamespaceString( - _ns.tenantId(), // always reuse the request's tenant in case no tenant in the response. - cr.getNSS().toString()); // find command can change the ns to use for getMores. + _ns = cr.getNSS(); // find command can change the ns to use for getMores. // Store the resume token, if we got one. _postBatchResumeToken = cr.getPostBatchResumeToken(); _batch.objs = cr.releaseBatch(); @@ -345,7 +340,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client, _originalHost(_client->getServerAddress()), _nsOrUuid(nsOrUuid), _isInitialized(true), - _ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbName().value())), + _ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())), _cursorId(cursorId), _isExhaust(isExhaust), _operationTime(operationTime), @@ -358,7 +353,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client, : _client(client), _originalHost(_client->getServerAddress()), _nsOrUuid(findRequest.getNamespaceOrUUID()), - _ns(_nsOrUuid.nss() ? *_nsOrUuid.nss() : NamespaceString(_nsOrUuid.dbName().value())), + _ns(_nsOrUuid.nss() ? *_nsOrUuid.nss() : NamespaceString(_nsOrUuid.dbname())), _batchSize(findRequest.getBatchSize().value_or(0)), _findRequest(std::move(findRequest)), _readPref(readPref), diff --git a/src/mongo/client/dbclient_cursor_test.cpp b/src/mongo/client/dbclient_cursor_test.cpp index a7c1f9c5666..fa0c699b2e6 100644 --- a/src/mongo/client/dbclient_cursor_test.cpp +++ b/src/mongo/client/dbclient_cursor_test.cpp @@ -187,42 +187,6 @@ TEST_F(DBClientCursorTest, DBClientCursorCallsMetaDataReaderOncePerBatch) { ASSERT_EQ(2, numMetaRead); } -TEST_F(DBClientCursorTest, DBClientCursorGetMoreWithTenant) { - // Set up the DBClientCursor and a mock client connection. - DBClientConnectionForTest conn; - const TenantId tenantId(OID::gen()); - const NamespaceString nss(tenantId, "test", "coll"); - FindCommandRequest findCmd{nss}; - DBClientCursor cursor(&conn, findCmd, ReadPreferenceSetting{}, false); - cursor.setBatchSize(2); - - // Set up mock 'find' response. - const long long cursorId = 42; - Message findResponseMsg = mockFindResponse(nss, cursorId, {docObj(1), docObj(2)}); - conn.setCallResponse(findResponseMsg); - - // Trigger a find command. - ASSERT(cursor.init()); - - // First batch from the initial find command. - ASSERT_BSONOBJ_EQ(docObj(1), cursor.next()); - ASSERT_BSONOBJ_EQ(docObj(2), cursor.next()); - ASSERT_FALSE(cursor.moreInCurrentBatch()); - - // Set a terminal getMore response with cursorId 0. - auto getMoreResponseMsg = mockGetMoreResponse(nss, 0, {docObj(3), docObj(4)}); - conn.setCallResponse(getMoreResponseMsg); - - // Trigger a subsequent getMore command. - ASSERT_TRUE(cursor.more()); - - // Second batch from the getMore command. - ASSERT_BSONOBJ_EQ(docObj(3), cursor.next()); - ASSERT_BSONOBJ_EQ(docObj(4), cursor.next()); - ASSERT_FALSE(cursor.moreInCurrentBatch()); - ASSERT_TRUE(cursor.isDead()); -} - TEST_F(DBClientCursorTest, DBClientCursorHandlesOpMsgExhaustCorrectly) { // Set up the DBClientCursor and a mock client connection. diff --git a/src/mongo/client/streamable_replica_set_monitor.cpp b/src/mongo/client/streamable_replica_set_monitor.cpp index fcbf2e99a03..960eea40256 100644 --- a/src/mongo/client/streamable_replica_set_monitor.cpp +++ b/src/mongo/client/streamable_replica_set_monitor.cpp @@ -104,6 +104,16 @@ std::string readPrefToStringFull(const ReadPreferenceSetting& readPref) { return builder.obj().toString(); } +std::string hostListToString(boost::optional> x) { + std::stringstream s; + if (x) { + for (const auto& h : *x) { + s << h.toString() << "; "; + } + } + return s.str(); +} + double pingTimeMillis(const ServerDescriptionPtr& serverDescription) { const auto& serverRtt = serverDescription->getRtt(); // Convert to micros so we don't lose information if under a ms diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 0cf0a1d57c9..fcf495760a1 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -2517,6 +2517,9 @@ if wiredtiger: 'mirroring_sampler_test.cpp', 'multi_key_path_tracker_test.cpp', 'namespace_string_test.cpp', + 'op_observer/op_observer_impl_test.cpp', + 'op_observer/op_observer_registry_test.cpp', + 'op_observer/user_write_block_mode_op_observer_test.cpp', 'operation_context_test.cpp', 'operation_cpu_timer_test.cpp', 'operation_id_test.cpp', @@ -2558,16 +2561,27 @@ if wiredtiger: '$BUILD_DIR/mongo/db/catalog/catalog_test_fixture', '$BUILD_DIR/mongo/db/catalog/collection_crud', '$BUILD_DIR/mongo/db/catalog/database_holder', + '$BUILD_DIR/mongo/db/catalog/import_collection_oplog_entry', '$BUILD_DIR/mongo/db/catalog/index_build_entry_idl', + '$BUILD_DIR/mongo/db/catalog/local_oplog_info', '$BUILD_DIR/mongo/db/change_collection_expired_change_remover', '$BUILD_DIR/mongo/db/change_stream_change_collection_manager', '$BUILD_DIR/mongo/db/change_stream_serverless_helpers', '$BUILD_DIR/mongo/db/change_streams_cluster_parameter', + '$BUILD_DIR/mongo/db/commands/create_command', '$BUILD_DIR/mongo/db/mongohasher', + '$BUILD_DIR/mongo/db/op_observer/fcv_op_observer', + '$BUILD_DIR/mongo/db/op_observer/op_observer', + '$BUILD_DIR/mongo/db/op_observer/op_observer_impl', + '$BUILD_DIR/mongo/db/op_observer/op_observer_util', + '$BUILD_DIR/mongo/db/op_observer/oplog_writer_impl', + '$BUILD_DIR/mongo/db/op_observer/user_write_block_mode_op_observer', '$BUILD_DIR/mongo/db/ops/write_ops', '$BUILD_DIR/mongo/db/pipeline/change_stream_expired_pre_image_remover', '$BUILD_DIR/mongo/db/query/common_query_enums_and_helpers', '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/repl/image_collection_entry', + '$BUILD_DIR/mongo/db/repl/oplog_interface_local', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/repl/repl_server_parameters', '$BUILD_DIR/mongo/db/repl/replmocks', @@ -2591,10 +2605,12 @@ if wiredtiger: '$BUILD_DIR/mongo/util/clock_source_mock', '$BUILD_DIR/mongo/util/net/network', '$BUILD_DIR/mongo/util/net/ssl_options_server', + 'batched_write_context', 'collection_index_usage_tracker', 'commands', 'common', 'curop', + 'dbdirectclient', 'dbmessage', 'fle_crud', 'fle_mocks', diff --git a/src/mongo/db/auth/auth_op_observer.h b/src/mongo/db/auth/auth_op_observer.h index 4b4186db6af..365b223f5e7 100644 --- a/src/mongo/db/auth/auth_op_observer.h +++ b/src/mongo/db/auth/auth_op_observer.h @@ -56,8 +56,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -217,6 +216,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp index c7404c0b58b..5370096843a 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp @@ -49,6 +49,18 @@ #include "mongo/util/str.h" namespace mongo { +namespace { +// TODO (SERVER-67423) Once UMCs can inject users/roles correctly, +// we'll be able to pull them back out correctly. +// For now, we have to mangle the namespace strings for consistency. +NamespaceString patchForMultitenant(const NamespaceString& nss) { + if (nss.tenantId()) { + return NamespaceString(boost::none, nss.dbName().toStringWithTenantId(), nss.coll()); + } else { + return nss; + } +} +} // namespace AuthzManagerExternalStateMongod::AuthzManagerExternalStateMongod() = default; AuthzManagerExternalStateMongod::~AuthzManagerExternalStateMongod() = default; @@ -65,7 +77,7 @@ Status AuthzManagerExternalStateMongod::query( const std::function& resultProcessor) { try { DBDirectClient client(opCtx); - FindCommandRequest findRequest{collectionName}; + FindCommandRequest findRequest{patchForMultitenant(collectionName)}; findRequest.setFilter(filter); findRequest.setProjection(projection); client.find(std::move(findRequest), resultProcessor); @@ -79,7 +91,7 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& query, BSONObj* result) { - AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, nss); + AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, patchForMultitenant(nss)); BSONObj found; if (Helpers::findOne(opCtx, ctx.getCollection(), query, found)) { @@ -93,7 +105,7 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* opCtx, bool AuthzManagerExternalStateMongod::hasOne(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& query) { - AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, nss); + AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, patchForMultitenant(nss)); return !Helpers::findOne(opCtx, ctx.getCollection(), query).isNull(); } diff --git a/src/mongo/db/auth/validated_tenancy_scope_test.cpp b/src/mongo/db/auth/validated_tenancy_scope_test.cpp index 94b35e9626b..037faba6538 100644 --- a/src/mongo/db/auth/validated_tenancy_scope_test.cpp +++ b/src/mongo/db/auth/validated_tenancy_scope_test.cpp @@ -90,7 +90,7 @@ protected: }; TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportOffWithoutTenantOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", false); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", false); auto body = BSON("$db" << "foo"); @@ -99,7 +99,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportOffWithoutTenantOK) } TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); auto kOid = OID::gen(); auto body = BSON("ping" << 1 << "$tenant" << kOid); @@ -111,7 +111,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantOK) { } TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithSecurityTokenOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const TenantId kTenantId(OID::gen()); auto body = BSON("ping" << 1); @@ -126,7 +126,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithSecurityTokenOK) } TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportOffWithTenantNOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", false); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", false); auto kOid = OID::gen(); auto body = BSON("ping" << 1 << "$tenant" << kOid); @@ -139,7 +139,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportOffWithTenantNOK) { } TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantNOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); auto kOid = OID::gen(); auto body = BSON("ping" << 1 << "$tenant" << kOid); @@ -153,7 +153,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantNOK) { // TODO SERVER-66822: Re-enable this test case. // TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithoutTenantAndSecurityTokenNOK) { -// RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); +// RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); // auto body = BSON("ping" << 1); // AuthorizationSessionImplTestHelper::grantUseTenant(*(client.get())); // ASSERT_THROWS_CODE(ValidatedTenancyScope::create(client.get(), body, {}), DBException, @@ -161,7 +161,7 @@ TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantNOK) { // } TEST_F(ValidatedTenancyScopeTestFixture, MultitenancySupportWithTenantAndSecurityTokenNOK) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); auto kOid = OID::gen(); auto body = BSON("ping" << 1 << "$tenant" << kOid); diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 010bb1eccf0..a15c2276aaf 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -1050,43 +1050,44 @@ StatusWith CollectionImpl::updateDocumentWithDamages( } RecordData oldRecordData(oldDoc.value().objdata(), oldDoc.value().objsize()); - StatusWith recordData = - _shared->_recordStore->updateWithDamages(opCtx, loc, oldRecordData, damageSource, damages); - if (!recordData.isOK()) - return recordData.getStatus(); - BSONObj newDoc = std::move(recordData.getValue()).releaseToBson().getOwned(); - - args->updatedDoc = newDoc; - args->changeStreamPreAndPostImagesEnabledForCollection = - isChangeStreamPreAndPostImagesEnabled(); - - if (indexesAffected) { - int64_t keysInserted = 0; - int64_t keysDeleted = 0; - - uassertStatusOK(_indexCatalog->updateRecord(opCtx, - {this, CollectionPtr::NoYieldTag{}}, - oldDoc.value(), - args->updatedDoc, - loc, - &keysInserted, - &keysDeleted)); - - if (opDebug) { - opDebug->additiveMetrics.incrementKeysInserted(keysInserted); - opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted); - // 'opDebug' may be deleted at rollback time in case of multi-document transaction. - if (!opCtx->inMultiDocumentTransaction()) { - opCtx->recoveryUnit()->onRollback([opDebug, keysInserted, keysDeleted]() { - opDebug->additiveMetrics.incrementKeysInserted(-keysInserted); - opDebug->additiveMetrics.incrementKeysDeleted(-keysDeleted); - }); + StatusWith newDocStatus = + _shared->_recordStore->updateWithDamages(opCtx, loc, oldRecordData, damageSource, damages) + .transform( + [](RecordData&& recordData) { return recordData.releaseToBson().getOwned(); }); + + if (newDocStatus.isOK()) { + args->updatedDoc = newDocStatus.getValue(); + args->changeStreamPreAndPostImagesEnabledForCollection = + isChangeStreamPreAndPostImagesEnabled(); + + if (indexesAffected) { + int64_t keysInserted = 0; + int64_t keysDeleted = 0; + + uassertStatusOK(_indexCatalog->updateRecord(opCtx, + {this, CollectionPtr::NoYieldTag{}}, + oldDoc.value(), + args->updatedDoc, + loc, + &keysInserted, + &keysDeleted)); + + if (opDebug) { + opDebug->additiveMetrics.incrementKeysInserted(keysInserted); + opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted); + // 'opDebug' may be deleted at rollback time in case of multi-document transaction. + if (!opCtx->inMultiDocumentTransaction()) { + opCtx->recoveryUnit()->onRollback([opDebug, keysInserted, keysDeleted]() { + opDebug->additiveMetrics.incrementKeysInserted(-keysInserted); + opDebug->additiveMetrics.incrementKeysDeleted(-keysDeleted); + }); + } } } - } - opCtx->getServiceContext()->getOpObserver()->onUpdate(opCtx, onUpdateArgs); - return newDoc; + opCtx->getServiceContext()->getOpObserver()->onUpdate(opCtx, onUpdateArgs); + } + return newDocStatus; } bool CollectionImpl::isTemporary() const { diff --git a/src/mongo/db/catalog/collection_uuid_mismatch.cpp b/src/mongo/db/catalog/collection_uuid_mismatch.cpp index ef5062be962..57a6551dc9b 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch.cpp +++ b/src/mongo/db/catalog/collection_uuid_mismatch.cpp @@ -44,7 +44,7 @@ void checkCollectionUUIDMismatch(OperationContext* opCtx, auto actualNamespace = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, *uuid); uassert( - (CollectionUUIDMismatchInfo{ns.dbName(), + (CollectionUUIDMismatchInfo{ns.db().toString(), *uuid, ns.coll().toString(), actualNamespace && actualNamespace->db() == ns.db() diff --git a/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp b/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp index 86a5cf5ab66..7cb40347e92 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp +++ b/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp @@ -45,14 +45,14 @@ constexpr StringData kActualCollectionFieldName = "actualCollection"_sd; std::shared_ptr CollectionUUIDMismatchInfo::parse(const BSONObj& obj) { auto actualNamespace = obj[kActualCollectionFieldName]; return std::make_shared( - DatabaseName(obj[kDbFieldName].str()), + obj[kDbFieldName].str(), UUID::parse(obj[kCollectionUUIDFieldName]).getValue(), obj[kExpectedCollectionFieldName].str(), actualNamespace.isNull() ? boost::none : boost::make_optional(actualNamespace.str())); } void CollectionUUIDMismatchInfo::serialize(BSONObjBuilder* builder) const { - builder->append(kDbFieldName, _dbName.db()); + builder->append(kDbFieldName, _db); _collectionUUID.appendToBuilder(builder, kCollectionUUIDFieldName); builder->append(kExpectedCollectionFieldName, _expectedCollection); if (_actualCollection) { diff --git a/src/mongo/db/catalog/collection_uuid_mismatch_info.h b/src/mongo/db/catalog/collection_uuid_mismatch_info.h index 01147676076..2a627f75e7e 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch_info.h +++ b/src/mongo/db/catalog/collection_uuid_mismatch_info.h @@ -30,7 +30,7 @@ #pragma once #include "mongo/base/error_extra_info.h" -#include "mongo/db/database_name.h" + #include "mongo/util/uuid.h" namespace mongo { @@ -38,11 +38,11 @@ class CollectionUUIDMismatchInfo final : public ErrorExtraInfo { public: static constexpr auto code = ErrorCodes::CollectionUUIDMismatch; - explicit CollectionUUIDMismatchInfo(DatabaseName dbName, + explicit CollectionUUIDMismatchInfo(std::string db, UUID collectionUUID, std::string expectedCollection, boost::optional actualCollection) - : _dbName(std::move(dbName)), + : _db(std::move(db)), _collectionUUID(std::move(collectionUUID)), _expectedCollection(std::move(expectedCollection)), _actualCollection(std::move(actualCollection)) {} @@ -51,8 +51,8 @@ public: void serialize(BSONObjBuilder* builder) const override; - const auto& dbName() const { - return _dbName; + const auto& db() const { + return _db; } const auto& collectionUUID() const { @@ -68,7 +68,7 @@ public: } private: - DatabaseName _dbName; + std::string _db; UUID _collectionUUID; std::string _expectedCollection; boost::optional _actualCollection; diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp index a587b935bdb..7b39f823443 100644 --- a/src/mongo/db/catalog/collection_validation.cpp +++ b/src/mongo/db/catalog/collection_validation.cpp @@ -626,7 +626,7 @@ Status validate(OperationContext* opCtx, logAttrs(validateState.nss()), logAttrs(validateState.uuid())); } catch (const DBException& e) { - if (!opCtx->checkForInterruptNoAssert().isOK() || e.code() == ErrorCodes::Interrupted) { + if (opCtx->isKillPending() || e.code() == ErrorCodes::Interrupted) { LOGV2_OPTIONS(5160301, {LogComponent::kIndex}, "Validation interrupted", diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp index da4bcfd6d09..645362a327d 100644 --- a/src/mongo/db/catalog/drop_collection.cpp +++ b/src/mongo/db/catalog/drop_collection.cpp @@ -362,7 +362,7 @@ Status _dropCollection(OperationContext* opCtx, auto db = autoDb.getDb(); if (!db) { return expectedUUID - ? Status{CollectionUUIDMismatchInfo(collectionName.dbName(), + ? Status{CollectionUUIDMismatchInfo(collectionName.db().toString(), *expectedUUID, collectionName.coll().toString(), boost::none), diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.cpp b/src/mongo/db/change_stream_pre_images_collection_manager.cpp index cb1be82323c..25a12d7636a 100644 --- a/src/mongo/db/change_stream_pre_images_collection_manager.cpp +++ b/src/mongo/db/change_stream_pre_images_collection_manager.cpp @@ -472,12 +472,11 @@ void deleteExpiredChangeStreamPreImages(Client* client, Date_t currentTimeForTim "jobDuration"_attr = (Date_t::now() - startTime).toString()); } } catch (const DBException& exception) { - Status interruptStatus = opCtx ? opCtx.get()->checkForInterruptNoAssert() : Status::OK(); - if (!interruptStatus.isOK()) { + if (opCtx && opCtx.get()->getKillStatus() != ErrorCodes::OK) { LOGV2_DEBUG(5869105, 3, - "Periodic expired pre-images removal job operation was interrupted", - "errorCode"_attr = interruptStatus); + "Periodic expired pre-images removal job operation was killed", + "errorCode"_attr = opCtx.get()->getKillStatus()); } else { LOGV2_ERROR(5869106, "Periodic expired pre-images removal job failed", diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp index 52a0a7121ed..e83e572ba3d 100644 --- a/src/mongo/db/commands.cpp +++ b/src/mongo/db/commands.cpp @@ -932,7 +932,8 @@ private: } void doCheckAuthorization(OperationContext* opCtx) const override { - uassertStatusOK(_command->checkAuthForOperation(opCtx, _dbName, _request.body)); + uassertStatusOK(_command->checkAuthForOperation( + opCtx, _request.getDatabase().toString(), _request.body)); } const BSONObj& cmdObj() const { @@ -995,9 +996,9 @@ Status BasicCommandWithReplyBuilderInterface::explain(OperationContext* opCtx, } Status BasicCommandWithReplyBuilderInterface::checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const { - return checkAuthForCommand(opCtx->getClient(), dbname.db(), cmdObj); + return checkAuthForCommand(opCtx->getClient(), dbname, cmdObj); } Status BasicCommandWithReplyBuilderInterface::checkAuthForCommand(Client* client, diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h index 83e1d7f2de0..64a83ee99ae 100644 --- a/src/mongo/db/commands.h +++ b/src/mongo/db/commands.h @@ -905,7 +905,7 @@ public: * command. Default implementation defers to checkAuthForCommand. */ virtual Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const; /** diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp index d114e8396d9..d074ecc3edb 100644 --- a/src/mongo/db/commands/apply_ops_cmd.cpp +++ b/src/mongo/db/commands/apply_ops_cmd.cpp @@ -203,10 +203,10 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const override { OplogApplicationValidity validity = validateApplyOpsCommand(cmdObj); - return OplogApplicationChecks::checkAuthForCommand(opCtx, dbname.db(), cmdObj, validity); + return OplogApplicationChecks::checkAuthForCommand(opCtx, dbname, cmdObj, validity); } bool run(OperationContext* opCtx, diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp index 0b875b9fa67..522df5047e6 100644 --- a/src/mongo/db/commands/count_cmd.cpp +++ b/src/mongo/db/commands/count_cmd.cpp @@ -129,7 +129,7 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); @@ -138,10 +138,11 @@ public: } const auto hasTerm = false; - return auth::checkAuthForFind(authSession, - CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID( - opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj)), - hasTerm); + return auth::checkAuthForFind( + authSession, + CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID( + opCtx, CommandHelpers::parseNsOrUUID({boost::none, dbname}, cmdObj)), + hasTerm); } Status explain(OperationContext* opCtx, diff --git a/src/mongo/db/commands/create_indexes_cmd.cpp b/src/mongo/db/commands/create_indexes_cmd.cpp index 9d867dd340d..f6da49a5444 100644 --- a/src/mongo/db/commands/create_indexes_cmd.cpp +++ b/src/mongo/db/commands/create_indexes_cmd.cpp @@ -606,7 +606,7 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, throw; } catch (const DBException& ex) { - if (opCtx->checkForInterruptNoAssert().isOK()) { + if (!opCtx->isKillPending()) { throw; } diff --git a/src/mongo/db/commands/cst_command.cpp b/src/mongo/db/commands/cst_command.cpp index 3e44cf24294..9f68f8ca2be 100644 --- a/src/mongo/db/commands/cst_command.cpp +++ b/src/mongo/db/commands/cst_command.cpp @@ -54,14 +54,14 @@ public: // Test commands should never be enabled in production, but we try to require auth on new // test commands anyway, just in case someone enables them by mistake. Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); // This auth check is more restrictive than necessary, to make it simpler. // The CST command constructs a Pipeline, which might hold execution resources. // We could do fine-grained permission checking similar to the find or aggregate commands, // but that seems more complicated than necessary since this is only a test command. - if (!authSession->isAuthorizedForAnyActionOnAnyResourceInDB(dbname.db())) { + if (!authSession->isAuthorizedForAnyActionOnAnyResourceInDB(dbname)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index c6f8a329fd1..29c85819230 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -432,9 +432,9 @@ public: using Request = CollStatsCommand; Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const final { - const auto nss = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj); + const auto nss = CommandHelpers::parseNsCollectionRequired({boost::none, dbname}, cmdObj); auto as = AuthorizationSession::get(opCtx->getClient()); if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(nss), ActionType::collStats)) { diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index e37ff47d2de..1895281af59 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -119,7 +119,7 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, + const std::string& dbname, const BSONObj& cmdObj) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); @@ -128,10 +128,11 @@ public: } const auto hasTerm = false; - return auth::checkAuthForFind(authSession, - CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID( - opCtx, CommandHelpers::parseNsOrUUID(dbname, cmdObj)), - hasTerm); + return auth::checkAuthForFind( + authSession, + CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID( + opCtx, CommandHelpers::parseNsOrUUID({boost::none, dbname}, cmdObj)), + hasTerm); } bool allowedInTransactions() const final { diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp index af1ffc8d110..b01d445a122 100644 --- a/src/mongo/db/commands/generic_servers.cpp +++ b/src/mongo/db/commands/generic_servers.cpp @@ -259,7 +259,7 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const std::string&, const BSONObj&) const final { auto* as = AuthorizationSession::get(opCtx->getClient()); if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp index 9c29fb6a372..e190bf507a4 100644 --- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp @@ -69,8 +69,8 @@ public: return "kill logical sessions by pattern"; } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); if (!authSession->isAuthorizedForPrivilege( Privilege{ResourcePattern::forClusterResource(), ActionType::killAnySession})) { diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp index e7854c13d89..2c51bb3b0ae 100644 --- a/src/mongo/db/commands/kill_all_sessions_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_command.cpp @@ -69,8 +69,8 @@ public: return "kill all logical sessions, for a user, and their operations"; } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); if (!authSession->isAuthorizedForPrivilege( Privilege{ResourcePattern::forClusterResource(), ActionType::killAnySession})) { diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp index 8cfd99a7d90..50cd1a6d5f5 100644 --- a/src/mongo/db/commands/kill_sessions_command.cpp +++ b/src/mongo/db/commands/kill_sessions_command.cpp @@ -94,9 +94,9 @@ public: } // Any user can kill their own sessions - Status checkAuthForOperation(OperationContext*, - const DatabaseName&, - const BSONObj&) const override { + Status checkAuthForOperation(OperationContext* opCtx, + const std::string& dbname, + const BSONObj& cmdObj) const override { return Status::OK(); } diff --git a/src/mongo/db/commands/reap_logical_session_cache_now.cpp b/src/mongo/db/commands/reap_logical_session_cache_now.cpp index 2d70454db91..ab4c6f10e72 100644 --- a/src/mongo/db/commands/reap_logical_session_cache_now.cpp +++ b/src/mongo/db/commands/reap_logical_session_cache_now.cpp @@ -59,9 +59,9 @@ public: } // No auth needed because it only works when enabled via command line. - Status checkAuthForOperation(OperationContext*, - const DatabaseName&, - const BSONObj&) const override { + Status checkAuthForOperation(OperationContext* opCtx, + const std::string& dbname, + const BSONObj& cmdObj) const override { return Status::OK(); } diff --git a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp index 4e7aabd5f24..5dafebcce89 100644 --- a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp +++ b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp @@ -63,9 +63,9 @@ public: } // No auth needed because it only works when enabled via command line. - Status checkAuthForOperation(OperationContext*, - const DatabaseName&, - const BSONObj&) const override { + Status checkAuthForOperation(OperationContext* opCtx, + const std::string& dbname, + const BSONObj& cmdObj) const override { return Status::OK(); } diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp index f598459ac0d..795a6b267a1 100644 --- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp @@ -494,6 +494,7 @@ private: _createGlobalIndexesIndexes(opCtx, requestedVersion); } else if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { _createGlobalIndexesIndexes(opCtx, requestedVersion); + _addKeyPatternToRangeDeletionDocuments(opCtx, requestedVersion); } else { return; } @@ -510,6 +511,14 @@ private: } } + // TODO SERVER-69792: get rid of `_addKeyPatternToRangeDeletionDocuments` and its usages + void _addKeyPatternToRangeDeletionDocuments( + OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { + if (feature_flags::gRangeDeleterService.isEnabledOnVersion(requestedVersion)) { + addKeyPatternFieldIfMissing(opCtx); + } + } + // _runUpgrade performs all the upgrade specific code for setFCV. Any new feature specific // upgrade code should be placed in the _runUpgrade helper functions: // * _prepareForUpgrade: for any upgrade actions that should be done before taking the FCV full diff --git a/src/mongo/db/commands/start_session_command.cpp b/src/mongo/db/commands/start_session_command.cpp index de254ea1237..ca14935df92 100644 --- a/src/mongo/db/commands/start_session_command.cpp +++ b/src/mongo/db/commands/start_session_command.cpp @@ -71,15 +71,11 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { return Status::OK(); } - bool allowedWithSecurityToken() const final { - return true; - } - bool run(OperationContext* opCtx, const DatabaseName&, const BSONObj& cmdObj, diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index e955d44d8c4..0e932868abd 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -222,17 +222,35 @@ Status checkOkayToGrantPrivilegesToRole(const RoleName& role, const PrivilegeVec return Status::OK(); } +// TODO (SERVER-67423) Convert DBClient to accept DatabaseName type. +// Currently tenant is lost on the way from DBDirectClient to runCommand. +// For now, just mangle the NamespaceName into (`tenant_db`, `coll`) format. +NamespaceString patchTenantNSS(const NamespaceString& nss) { + if (auto tenant = nss.tenantId()) { + return NamespaceString( + boost::none, str::stream() << *tenant << '_' << nss.dbName().db(), nss.coll()); + } else { + return nss; + } +} + +// TODO (SERVER-67423) Convert DBClient to accept DatabaseName type. NamespaceString usersNSS(const boost::optional& tenant) { if (tenant) { - return NamespaceString(tenant, NamespaceString::kAdminDb, NamespaceString::kSystemUsers); + return NamespaceString(boost::none, + str::stream() << *tenant << '_' << NamespaceString::kAdminDb, + NamespaceString::kSystemUsers); } else { return AuthorizationManager::usersCollectionNamespace; } } +// TODO (SERVER-67423) Convert DBClient to accept DatabaseName type. NamespaceString rolesNSS(const boost::optional& tenant) { if (tenant) { - return NamespaceString(tenant, NamespaceString::kAdminDb, NamespaceString::kSystemRoles); + return NamespaceString(boost::none, + str::stream() << *tenant << '_' << NamespaceString::kAdminDb, + NamespaceString::kSystemRoles); } else { return AuthorizationManager::rolesCollectionNamespace; } @@ -249,7 +267,8 @@ Status insertAuthzDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& document) try { DBDirectClient client(opCtx); - write_ops::checkWriteErrors(client.insert(write_ops::InsertCommandRequest(nss, {document}))); + write_ops::checkWriteErrors( + client.insert(write_ops::InsertCommandRequest(patchTenantNSS(nss), {document}))); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); @@ -269,7 +288,7 @@ StatusWith updateAuthzDocuments(OperationContext* opCtx, bool multi) try { DBDirectClient client(opCtx); auto result = client.update([&] { - write_ops::UpdateCommandRequest updateOp(nss); + write_ops::UpdateCommandRequest updateOp(patchTenantNSS(nss)); updateOp.setUpdates({[&] { write_ops::UpdateOpEntry entry; entry.setQ(query); @@ -330,7 +349,7 @@ StatusWith removeAuthzDocuments(OperationContext* opCtx, const BSONObj& query) try { DBDirectClient client(opCtx); auto result = client.remove([&] { - write_ops::DeleteCommandRequest deleteOp(nss); + write_ops::DeleteCommandRequest deleteOp(patchTenantNSS(nss)); deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; entry.setQ(query); @@ -735,7 +754,11 @@ public: as->grantInternalAuthorization(_client.get()); } - _dbName = DatabaseName(tenant, kAdminDB); + if (tenant) { + _dbName = str::stream() << *tenant << '_' << kAdminDB; + } else { + _dbName = kAdminDB.toString(); + } AlternativeClientRegion clientRegion(_client); _sessionInfo.setStartTransaction(true); @@ -796,7 +819,20 @@ public: private: static bool validNamespace(const NamespaceString& nss) { - return (nss.dbName().db() == kAdminDB); + if (nss.dbName().db() == kAdminDB) { + return true; + } + if (gMultitenancySupport && !nss.tenantId()) { + // TODO (SERVER-67423) Convert DBClient to accept DatabaseName type. + try { + auto parsed = + NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(nss.ns()); + return parsed.dbName().db() == kAdminDB; + } catch (const DBException&) { + } + } + + return false; } StatusWith doCrudOp(BSONObj op) try { @@ -855,7 +891,7 @@ private: auto svcCtx = _client->getServiceContext(); auto sep = svcCtx->getServiceEntryPoint(); - auto opMsgRequest = OpMsgRequestBuilder::create(_dbName, cmdBuilder->obj()); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(_dbName, cmdBuilder->obj()); auto requestMessage = opMsgRequest.serialize(); // Switch to our local client and create a short-lived opCtx for this transaction op. @@ -874,7 +910,7 @@ private: bool _isReplSet; ServiceContext::UniqueClient _client; - DatabaseName _dbName; + std::string _dbName; OperationSessionInfoFromClient _sessionInfo; TransactionState _state = TransactionState::kInit; }; @@ -2260,7 +2296,7 @@ Status queryAuthzDocument(OperationContext* opCtx, const BSONObj& projection, const std::function& resultProcessor) try { DBDirectClient client(opCtx); - FindCommandRequest findRequest{nss}; + FindCommandRequest findRequest{patchTenantNSS(nss)}; findRequest.setFilter(query); findRequest.setProjection(projection); client.find(std::move(findRequest), resultProcessor); diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp index 6908803bf1c..f46c09d636e 100644 --- a/src/mongo/db/concurrency/d_concurrency.cpp +++ b/src/mongo/db/concurrency/d_concurrency.cpp @@ -165,10 +165,10 @@ Lock::GlobalLock::GlobalLock(OperationContext* opCtx, unlockFCVLock.dismiss(); unlockPBWM.dismiss(); } catch (const DBException& ex) { - // If our opCtx is interrupted or we got a LockTimeout or MaxTimeMSExpired, either throw or + // If our opCtx was killed or we got a LockTimeout or MaxTimeMSExpired, either throw or // suppress the exception depending on the specified interrupt behavior. For any other // exception, always throw. - if ((opCtx->checkForInterruptNoAssert().isOK() && ex.code() != ErrorCodes::LockTimeout && + if ((!opCtx->isKillPending() && ex.code() != ErrorCodes::LockTimeout && ex.code() != ErrorCodes::MaxTimeMSExpired) || _interruptBehavior == InterruptBehavior::kThrow) { throw; diff --git a/src/mongo/db/database_name_test.cpp b/src/mongo/db/database_name_test.cpp index b9068667242..88436c5d3f1 100644 --- a/src/mongo/db/database_name_test.cpp +++ b/src/mongo/db/database_name_test.cpp @@ -54,7 +54,7 @@ TEST(DatabaseNameTest, MultitenancySupportDisabled) { TEST(DatabaseNameTest, MultitenancySupportEnabledTenantIDNotRequired) { // TODO SERVER-62114 remove this test case. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); DatabaseName dbnWithoutTenant(boost::none, "a"); ASSERT(!dbnWithoutTenant.tenantId()); @@ -74,13 +74,13 @@ TEST(DatabaseNameTest, MultitenancySupportEnabledTenantIDNotRequired) { // TODO SERVER-65457 Re-enable these tests DEATH_TEST(DatabaseNameTest, TenantIDRequiredNoTenantIdAssigned, "invariant") { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); DatabaseName dbnWithoutTenant(boost::none, "a"); } TEST(DatabaseNameTest, TenantIDRequiredBasic) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); // TODO SERVER-62114 Remove enabling this feature flag. RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp index 55b871e2144..534747350c7 100644 --- a/src/mongo/db/dbdirectclient.cpp +++ b/src/mongo/db/dbdirectclient.cpp @@ -155,41 +155,25 @@ std::unique_ptr DBDirectClient::find(FindCommandRequest findRequ write_ops::FindAndModifyCommandReply DBDirectClient::findAndModify( const write_ops::FindAndModifyCommandRequest& findAndModify) { - auto request = findAndModify.serialize({}); - if (const auto& tenant = findAndModify.getDbName().tenantId()) { - request.setDollarTenant(tenant.value()); - } - auto response = runCommand(std::move(request)); + auto response = runCommand(findAndModify.serialize({})); return FindAndModifyOp::parseResponse(response->getCommandReply()); } write_ops::InsertCommandReply DBDirectClient::insert( const write_ops::InsertCommandRequest& insert) { - auto request = insert.serialize({}); - if (const auto& tenant = insert.getDbName().tenantId()) { - request.setDollarTenant(tenant.value()); - } - auto response = runCommand(request); + auto response = runCommand(insert.serialize({})); return InsertOp::parseResponse(response->getCommandReply()); } write_ops::UpdateCommandReply DBDirectClient::update( const write_ops::UpdateCommandRequest& update) { - auto request = update.serialize({}); - if (const auto& tenant = update.getDbName().tenantId()) { - request.setDollarTenant(tenant.value()); - } - auto response = runCommand(request); + auto response = runCommand(update.serialize({})); return UpdateOp::parseResponse(response->getCommandReply()); } write_ops::DeleteCommandReply DBDirectClient::remove( const write_ops::DeleteCommandRequest& remove) { - auto request = remove.serialize({}); - if (const auto& tenant = remove.getDbName().tenantId()) { - request.setDollarTenant(tenant.value()); - } - auto response = runCommand(request); + auto response = runCommand(remove.serialize({})); return DeleteOp::parseResponse(response->getCommandReply()); } @@ -203,10 +187,10 @@ long long DBDirectClient::count(const NamespaceStringOrUUID nsOrUuid, DirectClientScope directClientScope(_opCtx); BSONObj cmdObj = _countCmd(nsOrUuid, query, options, limit, skip, boost::none); - auto& dbName = (nsOrUuid.uuid() ? nsOrUuid.dbName().value() : (*nsOrUuid.nss()).dbName()); + auto dbName = (nsOrUuid.uuid() ? nsOrUuid.dbname() : (*nsOrUuid.nss()).db().toString()); - auto request = OpMsgRequestBuilder::create(dbName, cmdObj); - auto result = CommandHelpers::runCommandDirectly(_opCtx, request); + auto result = CommandHelpers::runCommandDirectly( + _opCtx, OpMsgRequest::fromDBAndBody(dbName, std::move(cmdObj))); uassertStatusOK(getStatusFromCommandResult(result)); return static_cast(result["n"].numberLong()); diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index dd00b6366e9..89f71139f07 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -119,13 +119,14 @@ RecordId Helpers::findOne(OperationContext* opCtx, massertStatusOK(statusWithCQ.getStatus()); unique_ptr cq = std::move(statusWithCQ.getValue()); - cq->setForceGenerateRecordId(true); - auto exec = uassertStatusOK(getExecutor(opCtx, - &collection, - std::move(cq), - nullptr /* extractAndAttachPipelineStages */, - PlanYieldPolicy::YieldPolicy::NO_YIELD)); + auto exec = uassertStatusOK( + getExecutor(opCtx, + &collection, + std::move(cq), + nullptr /* extractAndAttachPipelineStages */, + PlanYieldPolicy::YieldPolicy::NO_YIELD, + QueryPlannerParams::DEFAULT | QueryPlannerParams::PRESERVE_RECORD_ID)); PlanExecutor::ExecState state; BSONObj obj; diff --git a/src/mongo/db/exec/sbe/expressions/expression.cpp b/src/mongo/db/exec/sbe/expressions/expression.cpp index 30d73ec7d7e..19fd4cf83c4 100644 --- a/src/mongo/db/exec/sbe/expressions/expression.cpp +++ b/src/mongo/db/exec/sbe/expressions/expression.cpp @@ -37,8 +37,6 @@ #include "mongo/db/exec/sbe/size_estimator.h" #include "mongo/db/exec/sbe/stages/spool.h" #include "mongo/db/exec/sbe/stages/stages.h" -#include "mongo/db/exec/sbe/values/arith_common.h" -#include "mongo/db/exec/sbe/vm/datetime.h" #include "mongo/util/str.h" namespace mongo { @@ -520,7 +518,8 @@ static stdx::unordered_map kBuiltinFunctions = { {"tsSecond", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::tsSecond, false}}, {"tsIncrement", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::tsIncrement, false}}, {"typeMatch", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::typeMatch, false}}, - {"dateTrunc", BuiltinFn{[](size_t n) { return n == 6; }, vm::Builtin::dateTrunc, false}}}; + {"dateTrunc", + BuiltinFn{[](size_t n) { return n == 5 || n == 6; }, vm::Builtin::dateTrunc, false}}}; /** * The code generation function. @@ -606,54 +605,6 @@ vm::CodeFragment EFunction::compileDirect(CompileCtx& ctx) const { } vm::CodeFragment code; - // Optimize well known set of functions with constant arguments and generate their - // specialized variants. - if (_name == "typeMatch" && _nodes[1]->as()) { - auto [tag, val] = _nodes[1]->as()->getConstant(); - if (tag == value::TypeTags::NumberInt64) { - auto mask = value::bitcastTo(val); - uassert(6996901, - "Second argument to typeMatch() must be a 32-bit integer constant", - mask >> 32 == 0 || mask >> 32 == -1); - code.append(_nodes[0]->compileDirect(ctx)); - code.appendTypeMatch(mask); - - return code; - } - } else if (_name == "dateTrunc" && _nodes[2]->as() && - _nodes[3]->as() && _nodes[4]->as() && - _nodes[5]->as()) { - // The validation for the arguments has been omitted here because the constants - // have already been validated in the stage builder. - auto [timezoneDBTag, timezoneDBVal] = - ctx.getRuntimeEnvAccessor(_nodes[0]->as()->getSlotId()) - ->getViewOfValue(); - auto timezoneDB = value::getTimeZoneDBView(timezoneDBVal); - - auto [unitTag, unitVal] = _nodes[2]->as()->getConstant(); - auto unitString = value::getStringView(unitTag, unitVal); - auto unit = parseTimeUnit(unitString); - - auto [binSizeTag, binSizeValue] = _nodes[3]->as()->getConstant(); - auto [binSizeLongOwn, binSizeLongTag, binSizeLongValue] = - genericNumConvert(binSizeTag, binSizeValue, value::TypeTags::NumberInt64); - auto binSize = value::bitcastTo(binSizeLongValue); - - auto [timezoneTag, timezoneVal] = _nodes[4]->as()->getConstant(); - auto timezone = vm::getTimezone(timezoneTag, timezoneVal, timezoneDB); - - DayOfWeek startOfWeek{kStartOfWeekDefault}; - if (unit == TimeUnit::week) { - auto [startOfWeekTag, startOfWeekVal] = _nodes[5]->as()->getConstant(); - auto startOfWeekString = value::getStringView(startOfWeekTag, startOfWeekVal); - startOfWeek = parseDayOfWeek(startOfWeekString); - } - - code.append(_nodes[1]->compileDirect(ctx)); - code.appendDateTrunc(unit, binSize, timezone, startOfWeek); - return code; - } - for (size_t idx = arity; idx-- > 0;) { code.append(_nodes[idx]->compileDirect(ctx)); } diff --git a/src/mongo/db/exec/sbe/expressions/expression.h b/src/mongo/db/exec/sbe/expressions/expression.h index 4ab8f46f2bb..60e370ee0a7 100644 --- a/src/mongo/db/exec/sbe/expressions/expression.h +++ b/src/mongo/db/exec/sbe/expressions/expression.h @@ -448,12 +448,6 @@ public: size_t estimateSize() const final { return sizeof(*this); } - boost::optional getFrameId() const { - return _frameId; - } - value::SlotId getSlotId() const { - return _var; - } private: value::SlotId _var; diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp index 6f10d3c5eec..7ee0a83d838 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp @@ -116,8 +116,18 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { value::OwnedValueAccessor startOfWeekAccessor; auto startOfWeekSlot = bindAccessor(&startOfWeekAccessor); - // Construct an invocation of "dateTrunc" function. + // Construct an invocation of "dateTrunc" function without 'startOfWeek' parameter. auto dateTruncExpression = + sbe::makeE("dateTrunc", + sbe::makeEs(makeE(timezoneDBSlot), + makeE(dateSlot), + makeE(unitSlot), + makeE(binSizeSlot), + makeE(timezoneSlot))); + auto compiledDateTrunc = compileExpression(*dateTruncExpression); + + // Construct an invocation of "dateTrunc" function with 'startOfWeek' parameter. + dateTruncExpression = sbe::makeE("dateTrunc", sbe::makeEs(makeE(timezoneDBSlot), makeE(dateSlot), @@ -125,7 +135,7 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { makeE(binSizeSlot), makeE(timezoneSlot), makeE(startOfWeekSlot))); - auto compiledDateTrunc = compileExpression(*dateTruncExpression); + auto compiledDateTruncWithStartOfWeek = compileExpression(*dateTruncExpression); // Setup timezone database. auto timezoneDatabase = std::make_unique(); @@ -138,14 +148,12 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { std::pair date; std::pair unit; std::pair binSize; - std::pair startOfWeek; std::pair expectedValue; // Output. + boost::optional> startOfWeek; }; const std::pair kNothing{value::TypeTags::Nothing, 0}; const std::pair kDate{makeDateValue(2022, 9, 12, 12, 24, 36)}; - const std::pair kDateOID{ - makeDateValueOID(2022, 9, 12, 12, 24, 36)}; const std::pair kHourTruncatedDate{ makeDateValue(2022, 9, 12, 12, 0, 0)}; std::vector testCases{ @@ -155,16 +163,14 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kHourTruncatedDate, }, { // Accepts OID values. value::makeNewString("GMT"), - kDateOID, + kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kHourTruncatedDate, }, { @@ -173,7 +179,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { convertTimestampToSbeValue(Timestamp{Hours{3}, 0}), value::makeNewString("hour"), makeLongValue(2), - value::makeNewString("sun"), makeDateValue(1970, 1, 1, 2, 0, 0), }, {// 'timezone' is Nothing. @@ -181,21 +186,18 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kNothing}, {// 'timezone' is not a valid type. makeLongValue(0), kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kNothing}, {// 'timezone' is not a recognized value. value::makeNewString("Arctic/North_Pole"), kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kNothing}, { // 'date' is Nothing. @@ -203,7 +205,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kNothing, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kNothing, }, { @@ -212,7 +213,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { makeLongValue(0), value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("sun"), kNothing, }, { @@ -221,7 +221,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, kNothing, makeLongValue(1), - value::makeNewString("sun"), kNothing, }, { @@ -230,7 +229,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, makeLongValue(0), makeLongValue(1), - value::makeNewString("sun"), kNothing, }, { @@ -239,7 +237,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("century"), makeLongValue(1), - value::makeNewString("sun"), kNothing, }, { @@ -248,7 +245,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), kNothing, - value::makeNewString("sun"), kNothing, }, { @@ -257,7 +253,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), kNothing, - value::makeNewString("sun"), kNothing, }, { @@ -266,7 +261,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), value::makeNewString("one"), - value::makeNewString("sun"), kNothing, }, { @@ -275,7 +269,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeDoubleValue(1.5), - value::makeNewString("sun"), kNothing, }, { @@ -284,7 +277,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeLongValue(0), - value::makeNewString("sun"), kNothing, }, { @@ -293,7 +285,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeLongValue(-1), - value::makeNewString("sun"), kNothing, }, { @@ -302,7 +293,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeIntValue(1), - value::makeNewString("sun"), kHourTruncatedDate, }, { @@ -311,7 +301,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeDoubleValue(1.0), - value::makeNewString("sun"), kHourTruncatedDate, }, { @@ -320,7 +309,6 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeDecimalValue("1"), - value::makeNewString("sun"), kHourTruncatedDate, }, {// 'startOfWeek' is present and invalid type. @@ -328,16 +316,16 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("hour"), makeLongValue(1), - makeLongValue(0), - kHourTruncatedDate}, + kNothing, + makeLongValue(0)}, { // 'startOfWeek' is present, valid type but invalid value, unit is not week. value::makeNewString("UTC"), kDate, value::makeNewString("hour"), makeLongValue(1), - value::makeNewString("holiday"), kHourTruncatedDate, + value::makeNewString("holiday"), }, { // 'startOfWeek' is Nothing, unit is week. @@ -354,8 +342,8 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("week"), makeLongValue(1), - makeLongValue(0), kNothing, + makeLongValue(0), }, { // 'startOfWeek' is invalid value, unit is week. @@ -363,8 +351,8 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { kDate, value::makeNewString("week"), makeLongValue(1), - value::makeNewString("holiday"), kNothing, + value::makeNewString("holiday"), }, { // 'startOfWeek' is valid value, unit is week. @@ -372,8 +360,16 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { makeDateValue(2022, 9, 12, 12, 24, 36), value::makeNewString("week"), makeLongValue(1), - value::makeNewString("Saturday"), makeDateValue(2022, 9, 10, 0, 0, 0), + value::makeNewString("Saturday"), + }, + { + // 'startOfWeek' is not specified (should default to Sunday), unit is week. + value::makeNewString("UTC"), + makeDateValue(2022, 9, 12, 12, 24, 36), + value::makeNewString("week"), + makeLongValue(1), + makeDateValue(2022, 9, 11, 0, 0, 0), }, }; @@ -383,10 +379,13 @@ TEST_F(SBEDateTruncTest, BasicDateTrunc) { dateAccessor.reset(testCase.date.first, testCase.date.second); unitAccessor.reset(testCase.unit.first, testCase.unit.second); binSizeAccessor.reset(testCase.binSize.first, testCase.binSize.second); - startOfWeekAccessor.reset(testCase.startOfWeek.first, testCase.startOfWeek.second); + if (testCase.startOfWeek) { + startOfWeekAccessor.reset(testCase.startOfWeek->first, testCase.startOfWeek->second); + } // Execute the "dateTrunc" function. - auto result = runCompiledExpression(compiledDateTrunc.get()); + auto result = runCompiledExpression( + (testCase.startOfWeek ? compiledDateTruncWithStartOfWeek : compiledDateTrunc).get()); auto [resultTag, resultValue] = result; value::ValueGuard resultGuard(resultTag, resultValue); diff --git a/src/mongo/db/exec/sbe/sbe_test.cpp b/src/mongo/db/exec/sbe/sbe_test.cpp index 8a93c14bf28..c3ffba02394 100644 --- a/src/mongo/db/exec/sbe/sbe_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_test.cpp @@ -469,29 +469,29 @@ TEST(SBEVM, CodeFragmentToStringArgs) { std::string toStringPattern{kAddrPattern}; code.appendFillEmpty(vm::Instruction::Null); - toStringPattern += instrPattern("fillEmptyImm", "k: Null"); + toStringPattern += instrPattern("fillEmptyConst", "k: Null"); code.appendFillEmpty(vm::Instruction::False); - toStringPattern += instrPattern("fillEmptyImm", "k: False"); + toStringPattern += instrPattern("fillEmptyConst", "k: False"); code.appendFillEmpty(vm::Instruction::True); - toStringPattern += instrPattern("fillEmptyImm", "k: True"); + toStringPattern += instrPattern("fillEmptyConst", "k: True"); code.appendTraverseP(0xAA, vm::Instruction::Nothing); auto offsetP1 = 0xAA - code.instrs().size(); toStringPattern += - instrPattern("traversePImm", "k: Nothing, offset: " + std::to_string(offsetP1)); + instrPattern("traversePConst", "k: Nothing, offset: " + std::to_string(offsetP1)); code.appendTraverseP(0xAA, vm::Instruction::Int32One); auto offsetP2 = 0xAA - code.instrs().size(); toStringPattern += - instrPattern("traversePImm", "k: 1, offset: " + std::to_string(offsetP2)); + instrPattern("traversePConst", "k: 1, offset: " + std::to_string(offsetP2)); code.appendTraverseF(0xBB, vm::Instruction::True); auto offsetF = 0xBB - code.instrs().size(); toStringPattern += - instrPattern("traverseFImm", "k: True, offset: " + std::to_string(offsetF)); + instrPattern("traverseFConst", "k: True, offset: " + std::to_string(offsetF)); auto [tag, val] = value::makeNewString("Hello world!"); value::ValueGuard guard{tag, val}; code.appendGetField(tag, val); - toStringPattern += instrPattern("getFieldImm", "value: \"Hello world!\""); + toStringPattern += instrPattern("getFieldConst", "value: \"Hello world!\""); code.appendAdd(); toStringPattern += instrPattern("add", ""); diff --git a/src/mongo/db/exec/sbe/values/arith_common.cpp b/src/mongo/db/exec/sbe/values/arith_common.cpp index f7646f2c4f1..358dcdb65e7 100644 --- a/src/mongo/db/exec/sbe/values/arith_common.cpp +++ b/src/mongo/db/exec/sbe/values/arith_common.cpp @@ -236,7 +236,7 @@ FastTuple genericArithmeticOp(value::TypeTa } else { int64_t result; if (!Op::doOperation( - bitcastTo(lhsValue), bitcastTo(rhsValue), result)) { + bitcastTo(lhsValue), bitcastTo(lhsValue), result)) { return {false, value::TypeTags::NumberInt64, value::bitcastFrom(result)}; } } @@ -268,25 +268,4 @@ FastTuple genericMul(value::TypeTags lhsTag return genericArithmeticOp(lhsTag, lhsValue, rhsTag, rhsValue); } -FastTuple genericNumConvert(value::TypeTags lhsTag, - value::Value lhsValue, - value::TypeTags targetTag) { - if (value::isNumber(lhsTag)) { - switch (lhsTag) { - case value::TypeTags::NumberInt32: - return numericConvLossless(value::bitcastTo(lhsValue), targetTag); - case value::TypeTags::NumberInt64: - return numericConvLossless(value::bitcastTo(lhsValue), targetTag); - case value::TypeTags::NumberDouble: - return numericConvLossless(value::bitcastTo(lhsValue), targetTag); - case value::TypeTags::NumberDecimal: - return numericConvLossless(value::bitcastTo(lhsValue), - targetTag); - default: - MONGO_UNREACHABLE - } - } - return {false, value::TypeTags::Nothing, 0}; -} - } // namespace mongo::sbe::value diff --git a/src/mongo/db/exec/sbe/values/arith_common.h b/src/mongo/db/exec/sbe/values/arith_common.h index b14e6df49b4..4e7682d0a3b 100644 --- a/src/mongo/db/exec/sbe/values/arith_common.h +++ b/src/mongo/db/exec/sbe/values/arith_common.h @@ -44,7 +44,4 @@ FastTuple genericMul(value::TypeTags lhsTag value::Value lhsValue, value::TypeTags rhsTag, value::Value rhsValue); -FastTuple genericNumConvert(value::TypeTags lhsTag, - value::Value lhsValue, - value::TypeTags targetTag); } // namespace mongo::sbe::value diff --git a/src/mongo/db/exec/sbe/vm/arith.cpp b/src/mongo/db/exec/sbe/vm/arith.cpp index 1386ac832ab..a325d31f676 100644 --- a/src/mongo/db/exec/sbe/vm/arith.cpp +++ b/src/mongo/db/exec/sbe/vm/arith.cpp @@ -557,6 +557,26 @@ FastTuple ByteCode::genericMod(value::TypeT return {false, value::TypeTags::Nothing, 0}; } +FastTuple ByteCode::genericNumConvert( + value::TypeTags lhsTag, value::Value lhsValue, value::TypeTags targetTag) { + if (value::isNumber(lhsTag)) { + switch (lhsTag) { + case value::TypeTags::NumberInt32: + return numericConvLossless(value::bitcastTo(lhsValue), targetTag); + case value::TypeTags::NumberInt64: + return numericConvLossless(value::bitcastTo(lhsValue), targetTag); + case value::TypeTags::NumberDouble: + return numericConvLossless(value::bitcastTo(lhsValue), targetTag); + case value::TypeTags::NumberDecimal: + return numericConvLossless(value::bitcastTo(lhsValue), + targetTag); + default: + MONGO_UNREACHABLE + } + } + return {false, value::TypeTags::Nothing, 0}; +} + FastTuple ByteCode::genericAbs(value::TypeTags operandTag, value::Value operandValue) { switch (operandTag) { diff --git a/src/mongo/db/exec/sbe/vm/vm.cpp b/src/mongo/db/exec/sbe/vm/vm.cpp index 38a17246d7e..c1427ba6eee 100644 --- a/src/mongo/db/exec/sbe/vm/vm.cpp +++ b/src/mongo/db/exec/sbe/vm/vm.cpp @@ -108,16 +108,16 @@ int Instruction::stackOffset[Instruction::Tags::lastInstruction] = { -2, // collCmp3w -1, // fillEmpty - 0, // fillEmptyImm + 0, // fillEmptyConst -1, // getField - 0, // getFieldImm + 0, // getFieldConst -1, // getElement -1, // collComparisonKey -1, // getFieldOrElement -2, // traverseP - 0, // traversePImm + 0, // traversePConst -2, // traverseF - 0, // traverseFImm + 0, // traverseFConst -2, // setField 0, // getArraySize @@ -144,7 +144,6 @@ int Instruction::stackOffset[Instruction::Tags::lastInstruction] = { 0, // isMinKey 0, // isMaxKey 0, // isTimestamp - 0, // typeMatchImm 0, // function is special, the stack offset is encoded in the instruction itself 0, // functionSmall is special, the stack offset is encoded in the instruction itself @@ -157,7 +156,6 @@ int Instruction::stackOffset[Instruction::Tags::lastInstruction] = { -1, // fail 0, // applyClassicMatcher - 0, // dateTruncImm }; void ByteCode::growAndResize() noexcept { @@ -262,8 +260,8 @@ std::string CodeFragment::toString() const { break; } // Instructions with other kinds of arguments. - case Instruction::traversePImm: - case Instruction::traverseFImm: { + case Instruction::traversePConst: + case Instruction::traverseFConst: { auto k = readFromMemory(pcPointer); pcPointer += sizeof(k); auto offset = readFromMemory(pcPointer); @@ -271,13 +269,13 @@ std::string CodeFragment::toString() const { ss << "k: " << Instruction::toStringConstants(k) << ", offset: " << offset; break; } - case Instruction::fillEmptyImm: { + case Instruction::fillEmptyConst: { auto k = readFromMemory(pcPointer); pcPointer += sizeof(k); ss << "k: " << Instruction::toStringConstants(k); break; } - case Instruction::getFieldImm: + case Instruction::getFieldConst: case Instruction::pushConstVal: { auto tag = readFromMemory(pcPointer); pcPointer += sizeof(tag); @@ -305,12 +303,6 @@ std::string CodeFragment::toString() const { ss << "tag: " << tag; break; } - case Instruction::typeMatchImm: { - auto mask = readFromMemory(pcPointer); - pcPointer += sizeof(mask); - ss << "mask: " << mask; - break; - } case Instruction::function: case Instruction::functionSmall: { auto f = readFromMemory(pcPointer); @@ -326,21 +318,6 @@ std::string CodeFragment::toString() const { ss << "f: " << static_cast(f) << ", arity: " << arity; break; } - case Instruction::dateTruncImm: { - auto unit = readFromMemory(pcPointer); - pcPointer += sizeof(unit); - auto binSize = readFromMemory(pcPointer); - pcPointer += sizeof(binSize); - auto timezone = readFromMemory(pcPointer); - pcPointer += sizeof(timezone); - auto startOfWeek = readFromMemory(pcPointer); - pcPointer += sizeof(startOfWeek); - ss << "unit: " << static_cast(unit) << ", binSize: " << binSize - << ", timezoneTzInfo: " << static_cast(timezone.getTzInfo()) - << ", timezoneUtcOffset: " << timezone.getUtcOffset() - << ", startOfWeek: " << static_cast(startOfWeek); - break; - } default: ss << "unknown"; } @@ -536,7 +513,7 @@ void CodeFragment::appendSimpleInstruction(Instruction::Tags tag) { void CodeFragment::appendFillEmpty(Instruction::Constants k) { Instruction i; - i.tag = Instruction::fillEmptyImm; + i.tag = Instruction::fillEmptyConst; adjustStackSimple(i); auto offset = allocateSpace(sizeof(Instruction) + sizeof(k)); @@ -553,7 +530,7 @@ void CodeFragment::appendGetField(value::TypeTags tag, value::Value val) { invariant(value::isString(tag)); Instruction i; - i.tag = Instruction::getFieldImm; + i.tag = Instruction::getFieldConst; adjustStackSimple(i); auto offset = allocateSpace(sizeof(Instruction) + sizeof(tag) + sizeof(val)); @@ -653,7 +630,7 @@ void CodeFragment::appendIsRecordId() { void CodeFragment::appendTraverseP(int codePosition, Instruction::Constants k) { Instruction i; - i.tag = Instruction::traversePImm; + i.tag = Instruction::traversePConst; adjustStackSimple(i); auto size = sizeof(Instruction) + sizeof(codePosition) + sizeof(k); @@ -668,7 +645,7 @@ void CodeFragment::appendTraverseP(int codePosition, Instruction::Constants k) { void CodeFragment::appendTraverseF(int codePosition, Instruction::Constants k) { Instruction i; - i.tag = Instruction::traverseFImm; + i.tag = Instruction::traverseFConst; adjustStackSimple(i); auto size = sizeof(Instruction) + sizeof(codePosition) + sizeof(k); @@ -681,36 +658,6 @@ void CodeFragment::appendTraverseF(int codePosition, Instruction::Constants k) { offset += writeToMemory(offset, codeOffset); } -void CodeFragment::appendTypeMatch(uint32_t mask) { - Instruction i; - i.tag = Instruction::typeMatchImm; - adjustStackSimple(i); - - auto size = sizeof(Instruction) + sizeof(mask); - auto offset = allocateSpace(size); - - offset += writeToMemory(offset, i); - offset += writeToMemory(offset, mask); -} - -void CodeFragment::appendDateTrunc(TimeUnit unit, - int64_t binSize, - TimeZone timezone, - DayOfWeek startOfWeek) { - Instruction i; - i.tag = Instruction::dateTruncImm; - adjustStackSimple(i); - - auto offset = allocateSpace(sizeof(Instruction) + sizeof(unit) + sizeof(binSize) + - sizeof(timezone) + sizeof(startOfWeek)); - - offset += writeToMemory(offset, i); - offset += writeToMemory(offset, unit); - offset += writeToMemory(offset, binSize); - offset += writeToMemory(offset, timezone); - offset += writeToMemory(offset, startOfWeek); -} - void CodeFragment::appendFunction(Builtin f, ArityType arity) { Instruction i; const bool isSmallArity = (arity <= std::numeric_limits::max()); @@ -2710,7 +2657,7 @@ FastTuple ByteCode::builtinDateDiff(ArityTy } FastTuple ByteCode::builtinDateTrunc(ArityType arity) { - invariant(arity == 6); + invariant(arity == 5 || arity == 6); // 6th parameter is 'startOfWeek'. auto [timezoneDBOwn, timezoneDBTag, timezoneDBValue] = getFromStack(0); if (timezoneDBTag != value::TypeTags::timeZoneDB) { @@ -2720,6 +2667,10 @@ FastTuple ByteCode::builtinDateTrunc(ArityT // Get date. auto [dateOwn, dateTag, dateValue] = getFromStack(1); + if (!coercibleToDate(dateTag)) { + return {false, value::TypeTags::Nothing, 0}; + } + auto date = getDate(dateTag, dateValue); // Get unit. auto [unitOwn, unitTag, unitValue] = getFromStack(2); @@ -2756,33 +2707,20 @@ FastTuple ByteCode::builtinDateTrunc(ArityT // Get startOfWeek, if 'startOfWeek' parameter was passed and time unit is the week. DayOfWeek startOfWeek{kStartOfWeekDefault}; - if (TimeUnit::week == unit) { + if (6 == arity) { auto [startOfWeekOwn, startOfWeekTag, startOfWeekValue] = getFromStack(5); if (!value::isString(startOfWeekTag)) { return {false, value::TypeTags::Nothing, 0}; } - auto startOfWeekString = value::getStringView(startOfWeekTag, startOfWeekValue); - if (!isValidDayOfWeek(startOfWeekString)) { - return {false, value::TypeTags::Nothing, 0}; + if (TimeUnit::week == unit) { + auto startOfWeekString = value::getStringView(startOfWeekTag, startOfWeekValue); + if (!isValidDayOfWeek(startOfWeekString)) { + return {false, value::TypeTags::Nothing, 0}; + } + startOfWeek = parseDayOfWeek(startOfWeekString); } - startOfWeek = parseDayOfWeek(startOfWeekString); } - return dateTrunc(dateTag, dateValue, unit, binSize, timezone, startOfWeek); -} - -FastTuple ByteCode::dateTrunc(value::TypeTags dateTag, - value::Value dateValue, - TimeUnit unit, - int64_t binSize, - TimeZone timezone, - DayOfWeek startOfWeek) { - // Get date. - if (!coercibleToDate(dateTag)) { - return {false, value::TypeTags::Nothing, 0}; - } - auto date = getDate(dateTag, dateValue); - auto truncatedDate = truncateDate(date, unit, binSize, timezone, startOfWeek); return {false, value::TypeTags::Date, @@ -5383,7 +5321,7 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { } break; } - case Instruction::fillEmptyImm: { + case Instruction::fillEmptyConst: { auto k = readFromMemory(pcPointer); pcPointer += sizeof(k); @@ -5433,7 +5371,7 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { } break; } - case Instruction::getFieldImm: { + case Instruction::getFieldConst: { auto tagField = readFromMemory(pcPointer); pcPointer += sizeof(tagField); auto valField = readFromMemory(pcPointer); @@ -5529,7 +5467,7 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { traverseP(code); break; } - case Instruction::traversePImm: { + case Instruction::traversePConst: { auto k = readFromMemory(pcPointer); pcPointer += sizeof(k); @@ -5547,7 +5485,7 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { traverseF(code); break; } - case Instruction::traverseFImm: { + case Instruction::traverseFConst: { auto k = readFromMemory(pcPointer); pcPointer += sizeof(k); @@ -5795,21 +5733,6 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { runTagCheck(value::TypeTags::Timestamp); break; } - case Instruction::typeMatchImm: { - auto mask = readFromMemory(pcPointer); - pcPointer += sizeof(mask); - - auto [owned, tag, val] = getFromStack(0); - if (tag != value::TypeTags::Nothing) { - topStack(false, - value::TypeTags::Boolean, - value::bitcastFrom(getBSONTypeMask(tag) & mask)); - } - if (owned) { - value::releaseValue(tag, val); - } - break; - } case Instruction::function: case Instruction::functionSmall: { auto f = readFromMemory(pcPointer); @@ -5881,28 +5804,6 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { runClassicMatcher(matcher); break; } - case Instruction::dateTruncImm: { - auto unit = readFromMemory(pcPointer); - pcPointer += sizeof(unit); - auto binSize = readFromMemory(pcPointer); - pcPointer += sizeof(binSize); - auto timezone = readFromMemory(pcPointer); - pcPointer += sizeof(timezone); - auto startOfWeek = readFromMemory(pcPointer); - pcPointer += sizeof(startOfWeek); - - auto [dateOwned, dateTag, dateVal] = getFromStack(0); - - auto [owned, tag, val] = - dateTrunc(dateTag, dateVal, unit, binSize, timezone, startOfWeek); - - topStack(owned, tag, val); - - if (dateOwned) { - value::releaseValue(dateTag, dateVal); - } - break; - } default: MONGO_UNREACHABLE; } diff --git a/src/mongo/db/exec/sbe/vm/vm.h b/src/mongo/db/exec/sbe/vm/vm.h index 3b7b1501d99..3f6a959aaf9 100644 --- a/src/mongo/db/exec/sbe/vm/vm.h +++ b/src/mongo/db/exec/sbe/vm/vm.h @@ -274,16 +274,16 @@ struct Instruction { collCmp3w, fillEmpty, - fillEmptyImm, + fillEmptyConst, getField, - getFieldImm, + getFieldConst, getElement, collComparisonKey, getFieldOrElement, traverseP, // traverse projection paths - traversePImm, + traversePConst, traverseF, // traverse filter paths - traverseFImm, + traverseFConst, setField, getArraySize, // number of elements @@ -310,7 +310,6 @@ struct Instruction { isMinKey, isMaxKey, isTimestamp, - typeMatchImm, function, functionSmall, @@ -324,8 +323,6 @@ struct Instruction { applyClassicMatcher, // Instruction which calls into the classic engine MatchExpression. - dateTruncImm, - lastInstruction // this is just a marker used to calculate number of instructions }; @@ -425,12 +422,12 @@ struct Instruction { return "collCmp3w"; case fillEmpty: return "fillEmpty"; - case fillEmptyImm: - return "fillEmptyImm"; + case fillEmptyConst: + return "fillEmptyConst"; case getField: return "getField"; - case getFieldImm: - return "getFieldImm"; + case getFieldConst: + return "getFieldConst"; case getElement: return "getElement"; case collComparisonKey: @@ -439,12 +436,12 @@ struct Instruction { return "getFieldOrElement"; case traverseP: return "traverseP"; - case traversePImm: - return "traversePImm"; + case traversePConst: + return "traversePConst"; case traverseF: return "traverseF"; - case traverseFImm: - return "traverseFImm"; + case traverseFConst: + return "traverseFConst"; case setField: return "setField"; case getArraySize: @@ -491,8 +488,6 @@ struct Instruction { return "isMaxKey"; case isTimestamp: return "isTimestamp"; - case typeMatchImm: - return "typeMatchImm"; case function: return "function"; case functionSmall: @@ -509,8 +504,6 @@ struct Instruction { return "fail"; case applyClassicMatcher: return "applyClassicMatcher"; - case dateTruncImm: - return "dateTruncImm"; default: return "unrecognized"; } @@ -762,7 +755,6 @@ public: appendSimpleInstruction(Instruction::setField); } void appendGetArraySize(); - void appendDateTrunc(TimeUnit unit, int64_t binSize, TimeZone timezone, DayOfWeek startOfWeek); void appendSum(); void appendMin(); @@ -791,7 +783,6 @@ public: void appendIsTimestamp() { appendSimpleInstruction(Instruction::isTimestamp); } - void appendTypeMatch(uint32_t mask); void appendFunction(Builtin f, ArityType arity); void appendJump(int jumpOffset); void appendJumpTrue(int jumpOffset); @@ -930,6 +921,9 @@ private: value::Value rhsVal, value::TypeTags collTag, value::Value collVal); + FastTuple genericNumConvert(value::TypeTags lhsTag, + value::Value lhsValue, + value::TypeTags rhsTag); std::pair compare3way( value::TypeTags lhsTag, @@ -1068,12 +1062,6 @@ private: value::Value timezoneValue); FastTuple genericNewKeyString( ArityType arity, CollatorInterface* collator = nullptr); - FastTuple dateTrunc(value::TypeTags dateTag, - value::Value dateValue, - TimeUnit unit, - int64_t binSize, - TimeZone timezone, - DayOfWeek startOfWeek); FastTuple builtinSplit(ArityType arity); FastTuple builtinDate(ArityType arity); diff --git a/src/mongo/db/free_mon/free_mon_op_observer.h b/src/mongo/db/free_mon/free_mon_op_observer.h index 60f3d4ed5b7..f6284c113df 100644 --- a/src/mongo/db/free_mon/free_mon_op_observer.h +++ b/src/mongo/db/free_mon/free_mon_op_observer.h @@ -56,8 +56,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -217,6 +216,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/ftdc/SConscript b/src/mongo/db/ftdc/SConscript index 72fb13d52aa..5ffe0b7d1b6 100644 --- a/src/mongo/db/ftdc/SConscript +++ b/src/mongo/db/ftdc/SConscript @@ -55,7 +55,6 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/rpc/command_status', - '$BUILD_DIR/mongo/rpc/message', ], LIBDEPS_TAGS=[ 'lint-allow-non-alphabetic', @@ -77,7 +76,6 @@ env.Library( '$BUILD_DIR/mongo/db/auth/authprivilege', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/storage/storage_options', - '$BUILD_DIR/mongo/rpc/message', 'ftdc_server', ], ) diff --git a/src/mongo/db/global_index.cpp b/src/mongo/db/global_index.cpp index ed702f76aac..13852a11eda 100644 --- a/src/mongo/db/global_index.cpp +++ b/src/mongo/db/global_index.cpp @@ -52,6 +52,8 @@ namespace mongo::global_index { namespace { // Anonymous namespace for private functions. +constexpr StringData kIndexKeyIndexName = "ik_1"_sd; + // Build an index entry to insert. BSONObj buildIndexEntry(const BSONObj& key, const BSONObj& docKey) { // Generate the KeyString representation of the index key. @@ -70,14 +72,12 @@ BSONObj buildIndexEntry(const BSONObj& key, const BSONObj& docKey) { // - 'tb': the index key's TypeBits. Only present if non-zero. BSONObjBuilder indexEntryBuilder; - indexEntryBuilder.append(kContainerIndexDocKeyFieldName, docKey); + indexEntryBuilder.append("_id", docKey); indexEntryBuilder.append( - kContainerIndexKeyFieldName, - BSONBinData(ks.getBuffer(), ks.getSize(), BinDataType::BinDataGeneral)); + "ik", BSONBinData(ks.getBuffer(), ks.getSize(), BinDataType::BinDataGeneral)); if (!indexTB.isAllZeros()) { indexEntryBuilder.append( - kContainerIndexKeyTypeBitsFieldName, - BSONBinData(indexTB.getBuffer(), indexTB.getSize(), BinDataType::BinDataGeneral)); + "tb", BSONBinData(indexTB.getBuffer(), indexTB.getSize(), BinDataType::BinDataGeneral)); } return indexEntryBuilder.obj(); } @@ -120,9 +120,8 @@ void createContainer(OperationContext* opCtx, const UUID& indexUUID) { // Create the container. return writeConflictRetry(opCtx, "createGlobalIndexContainer", nss.ns(), [&]() { - const auto indexKeySpec = - BSON("v" << 2 << "name" << kContainerIndexKeyFieldName.toString() + "_1" - << "key" << BSON(kContainerIndexKeyFieldName << 1) << "unique" << true); + const auto indexKeySpec = BSON("v" << 2 << "name" << kIndexKeyIndexName << "key" + << BSON("ik" << 1) << "unique" << true); WriteUnitOfWork wuow(opCtx); @@ -154,15 +153,13 @@ void createContainer(OperationContext* opCtx, const UUID& indexUUID) { str::stream() << "Collection with UUID " << indexUUID << " already exists but it's not clustered.", autoColl->getCollectionOptions().clusteredIndex); - tassert(6789205, - str::stream() << "Collection with UUID " << indexUUID - << " already exists but it's missing a unique index on " - << kContainerIndexKeyFieldName << ".", - autoColl->getIndexCatalog()->findIndexByKeyPatternAndOptions( - opCtx, - BSON(kContainerIndexKeyFieldName << 1), - indexKeySpec, - IndexCatalog::InclusionPolicy::kReady)); + tassert( + 6789205, + str::stream() << "Collection with UUID " << indexUUID + << " already exists but it's missing a unique index on " + "'ik'.", + autoColl->getIndexCatalog()->findIndexByKeyPatternAndOptions( + opCtx, BSON("ik" << 1), indexKeySpec, IndexCatalog::InclusionPolicy::kReady)); tassert(6789206, str::stream() << "Collection with namespace " << nss.ns() << " already exists but it has inconsistent UUID " @@ -194,8 +191,6 @@ void dropContainer(OperationContext* opCtx, const UUID& indexUUID) { return; } - const auto numKeys = autoColl->numRecords(opCtx); - WriteUnitOfWork wuow(opCtx); { repl::UnreplicatedWritesBlock unreplicatedWrites(opCtx); @@ -203,7 +198,7 @@ void dropContainer(OperationContext* opCtx, const UUID& indexUUID) { } auto opObserver = opCtx->getServiceContext()->getOpObserver(); - opObserver->onDropGlobalIndex(opCtx, nss, indexUUID, numKeys); + opObserver->onDropGlobalIndex(opCtx, nss, indexUUID); wuow.commit(); return; diff --git a/src/mongo/db/global_index.h b/src/mongo/db/global_index.h index b93fb439547..73d82da4a03 100644 --- a/src/mongo/db/global_index.h +++ b/src/mongo/db/global_index.h @@ -35,17 +35,6 @@ namespace mongo::global_index { -// The container (collection) fields of an index key. The document key is stored as a BSON object. -// The index key is stored in its KeyString representation, hence the need to store TypeBits. -constexpr auto kContainerIndexDocKeyFieldName = "_id"_sd; -constexpr auto kContainerIndexKeyFieldName = "ik"_sd; -constexpr auto kContainerIndexKeyTypeBitsFieldName = "tb"_sd; - -// The oplog entry fields representing an insert or delete of an index key. The index key and the -// document key are BSON objects. -constexpr auto kOplogEntryDocKeyFieldName = "dk"_sd; -constexpr auto kOplogEntryIndexKeyFieldName = kContainerIndexKeyFieldName; - /** * Creates the internal collection implements the global index container with the given UUID on the * shard. Replicates as a 'createGlobalIndex' command. This container-backing collection: diff --git a/src/mongo/db/global_index_test.cpp b/src/mongo/db/global_index_test.cpp index 1730d272b43..83cb975c990 100644 --- a/src/mongo/db/global_index_test.cpp +++ b/src/mongo/db/global_index_test.cpp @@ -98,21 +98,16 @@ void verifyStoredKeyMatchesIndexKey(const BSONObj& key, // 'tb' field stores the BinData(TypeBits(key)). The 'tb' field is not present if there are // no TypeBits. - auto entryIndexKeySize = indexEntry[global_index::kContainerIndexKeyFieldName].size(); - const auto entryIndexKeyBinData = - indexEntry[global_index::kContainerIndexKeyFieldName].binData(entryIndexKeySize); + auto entryIndexKeySize = indexEntry["ik"].size(); + const auto entryIndexKeyBinData = indexEntry["ik"].binData(entryIndexKeySize); - const auto hasTypeBits = - indexEntry.hasElement(global_index::kContainerIndexKeyTypeBitsFieldName); + const auto hasTypeBits = indexEntry.hasElement("tb"); ASSERT_EQ(expectTypeBits, hasTypeBits); auto tb = KeyString::TypeBits(KeyString::Version::V1); if (hasTypeBits) { - auto entryTypeBitsSize = - indexEntry[global_index::kContainerIndexKeyTypeBitsFieldName].size(); - auto entryTypeBitsBinData = - indexEntry[global_index::kContainerIndexKeyTypeBitsFieldName].binData( - entryTypeBitsSize); + auto entryTypeBitsSize = indexEntry["tb"].size(); + auto entryTypeBitsBinData = indexEntry["tb"].binData(entryTypeBitsSize); auto entryTypeBitsReader = BufReader(entryTypeBitsBinData, entryTypeBitsSize); tb = KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &entryTypeBitsReader); ASSERT(!tb.isAllZeros()); @@ -139,14 +134,12 @@ TEST_F(GlobalIndexTest, StorageFormat) { const auto key = BSON("" << "hola"); const auto docKey = BSON("shk0" << 0 << "shk1" << 0 << "_id" << 0); - const auto entryId = BSON(global_index::kContainerIndexDocKeyFieldName << docKey); + const auto entryId = BSON("_id" << docKey); global_index::insertKey(operationContext(), uuid, key, docKey); // Validate that the document key is stored in the index entry's _id field. - StatusWith status = - storageInterface()->findById(operationContext(), - NamespaceString::makeGlobalIndexNSS(uuid), - entryId[global_index::kContainerIndexDocKeyFieldName]); + StatusWith status = storageInterface()->findById( + operationContext(), NamespaceString::makeGlobalIndexNSS(uuid), entryId["_id"]); ASSERT_OK(status.getStatus()); const auto indexEntry = status.getValue(); @@ -160,14 +153,12 @@ TEST_F(GlobalIndexTest, StorageFormat) { << "hola" << "" << 1); const auto docKey = BSON("shk0" << 1 << "shk1" << 1 << "_id" << 1); - const auto entryId = BSON(global_index::kContainerIndexDocKeyFieldName << docKey); + const auto entryId = BSON("_id" << docKey); global_index::insertKey(operationContext(), uuid, key, docKey); // Validate that the document key is stored in the index entry's _id field. - StatusWith status = - storageInterface()->findById(operationContext(), - NamespaceString::makeGlobalIndexNSS(uuid), - entryId[global_index::kContainerIndexDocKeyFieldName]); + StatusWith status = storageInterface()->findById( + operationContext(), NamespaceString::makeGlobalIndexNSS(uuid), entryId["_id"]); ASSERT_OK(status.getStatus()); const auto indexEntry = status.getValue(); @@ -181,14 +172,12 @@ TEST_F(GlobalIndexTest, StorageFormat) { << "hola" << "" << 2LL); const auto docKey = BSON("shk0" << 2 << "shk1" << 2 << "_id" << 2); - const auto entryId = BSON(global_index::kContainerIndexDocKeyFieldName << docKey); + const auto entryId = BSON("_id" << docKey); global_index::insertKey(operationContext(), uuid, key, docKey); // Validate that the document key is stored in the index entry's _id field. - StatusWith status = - storageInterface()->findById(operationContext(), - NamespaceString::makeGlobalIndexNSS(uuid), - entryId[global_index::kContainerIndexDocKeyFieldName]); + StatusWith status = storageInterface()->findById( + operationContext(), NamespaceString::makeGlobalIndexNSS(uuid), entryId["_id"]); ASSERT_OK(status.getStatus()); const auto indexEntry = status.getValue(); @@ -202,14 +191,12 @@ TEST_F(GlobalIndexTest, StorageFormat) { << "hola" << "" << 3.0); const auto docKey = BSON("shk0" << 2 << "shk1" << 3 << "_id" << 3); - const auto entryId = BSON(global_index::kContainerIndexDocKeyFieldName << docKey); + const auto entryId = BSON("_id" << docKey); global_index::insertKey(operationContext(), uuid, key, docKey); // Validate that the document key is stored in the index entry's _id field. - StatusWith status = - storageInterface()->findById(operationContext(), - NamespaceString::makeGlobalIndexNSS(uuid), - entryId[global_index::kContainerIndexDocKeyFieldName]); + StatusWith status = storageInterface()->findById( + operationContext(), NamespaceString::makeGlobalIndexNSS(uuid), entryId["_id"]); ASSERT_OK(status.getStatus()); const auto indexEntry = status.getValue(); @@ -271,7 +258,7 @@ TEST_F(GlobalIndexTest, DeleteKey) { const auto insertAndVerifyDelete = [this](const UUID& uuid, const BSONObj& key, const BSONObj& docKey) { - const auto entryId = BSON(global_index::kContainerIndexDocKeyFieldName << docKey); + const auto entryId = BSON("_id" << docKey); const auto nss = NamespaceString::makeGlobalIndexNSS(uuid); // Inserts already tested in StorageFormat case. @@ -279,8 +266,7 @@ TEST_F(GlobalIndexTest, DeleteKey) { // Delete and validate that the key is not found. global_index::deleteKey(operationContext(), uuid, key, docKey); - ASSERT_NOT_OK(storageInterface()->findById( - operationContext(), nss, entryId[global_index::kContainerIndexDocKeyFieldName])); + ASSERT_NOT_OK(storageInterface()->findById(operationContext(), nss, entryId["_id"])); }; const auto docKey = BSON("shk0" << 0 << "shk1" << 0 << "_id" << 0); @@ -346,7 +332,7 @@ void _assertDocumentsInGlobalIndexById(OperationContext* opCtx, BSONObj obj; for (auto& id : ids) { ASSERT_EQUALS(exec->getNext(&obj, nullptr), PlanExecutor::ADVANCED); - ASSERT_BSONOBJ_EQ(id, obj.getObjectField(global_index::kContainerIndexDocKeyFieldName)); + ASSERT_BSONOBJ_EQ(id, obj.getObjectField("_id")); } ASSERT_EQUALS(exec->getNext(&obj, nullptr), PlanExecutor::IS_EOF); } diff --git a/src/mongo/db/index/SConscript b/src/mongo/db/index/SConscript index d4fc11138c6..32a10b4eee7 100644 --- a/src/mongo/db/index/SConscript +++ b/src/mongo/db/index/SConscript @@ -154,7 +154,6 @@ env.Library( '$BUILD_DIR/mongo/db/fts/base_fts', '$BUILD_DIR/mongo/db/resumable_index_builds_idl', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/storage/execution_context', 'column_store_index', 'expression_params', 'key_generator', diff --git a/src/mongo/db/index/columns_access_method.cpp b/src/mongo/db/index/columns_access_method.cpp index 55e6c9ad03a..ef03980b917 100644 --- a/src/mongo/db/index/columns_access_method.cpp +++ b/src/mongo/db/index/columns_access_method.cpp @@ -41,9 +41,7 @@ #include "mongo/db/index/column_cell.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/column_store_sorter.h" -#include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/storage/execution_context.h" #include "mongo/logv2/log.h" #include "mongo/util/progress_meter.h" @@ -56,11 +54,6 @@ inline void inc(int64_t* counter) { if (counter) ++*counter; }; - -inline void dec(int64_t* counter) { - if (counter) - --*counter; -}; } // namespace ColumnStoreAccessMethod::ColumnStoreAccessMethod(IndexCatalogEntry* ice, @@ -267,26 +260,6 @@ Status ColumnStoreAccessMethod::BulkBuilder::keyCommitted( return Status::OK(); } - -void ColumnStoreAccessMethod::_visitCellsForIndexInsert( - OperationContext* opCtx, - PooledFragmentBuilder& buf, - const std::vector& bsonRecords, - function_ref cb) const { - _keyGen.visitCellsForInsert( - bsonRecords, - [&](StringData path, const BsonRecord& rec, const column_keygen::UnencodedCellView& cell) { - if (!rec.ts.isNull()) { - uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(rec.ts)); - } - buf.reset(); - column_keygen::writeEncodedCell(cell, &buf); - tassert( - 6597800, "RecordID cannot be a string for column store indexes", !rec.id.isStr()); - cb(path, rec); - }); -} - Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, @@ -295,33 +268,24 @@ Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, int64_t* keysInsertedOut) { try { PooledFragmentBuilder buf(pooledBufferBuilder); - // We cannot write to the index during its initial build phase, so we defer this insert as a - // "side write" to be applied after the build completes. - if (_indexCatalogEntry->isHybridBuilding()) { - auto columnKeys = StorageExecutionContext::get(opCtx).columnKeys(); - _visitCellsForIndexInsert( - opCtx, buf, bsonRecords, [&](StringData path, const BsonRecord& rec) { - columnKeys->emplace_back( - path.toString(), CellView{buf.buf(), size_t(buf.len())}.toString(), rec.id); - }); - int64_t inserted = 0; - ON_BLOCK_EXIT([keysInsertedOut, inserted] { - if (keysInsertedOut) { - *keysInsertedOut += inserted; + auto cursor = _store->newWriteCursor(opCtx); + _keyGen.visitCellsForInsert( + bsonRecords, + [&](StringData path, + const BsonRecord& rec, + const column_keygen::UnencodedCellView& cell) { + if (!rec.ts.isNull()) { + uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(rec.ts)); } + + buf.reset(); + column_keygen::writeEncodedCell(cell, &buf); + invariant(!rec.id.isStr()); + cursor->insert(path, rec.id.getLong(), CellView{buf.buf(), size_t(buf.len())}); + + inc(keysInsertedOut); }); - uassertStatusOK(_indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnKeys, IndexBuildInterceptor::Op::kInsert, &inserted)); - return Status::OK(); - } else { - auto cursor = _store->newWriteCursor(opCtx); - _visitCellsForIndexInsert( - opCtx, buf, bsonRecords, [&](StringData path, const BsonRecord& rec) { - cursor->insert(path, rec.id.getLong(), CellView{buf.buf(), size_t(buf.len())}); - inc(keysInsertedOut); - }); - return Status::OK(); - } + return Status::OK(); } catch (const AssertionException& ex) { return ex.toStatus(); } @@ -336,26 +300,12 @@ void ColumnStoreAccessMethod::remove(OperationContext* opCtx, const InsertDeleteOptions& options, int64_t* keysDeletedOut, CheckRecordId checkRecordId) { - if (_indexCatalogEntry->isHybridBuilding()) { - auto columnKeys = StorageExecutionContext::get(opCtx).columnKeys(); - _keyGen.visitPathsForDelete(obj, [&](StringData path) { - columnKeys->emplace_back(std::make_tuple(path.toString(), "", rid)); - }); - int64_t removed = 0; - fassert(6597801, - _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnKeys, IndexBuildInterceptor::Op::kDelete, &removed)); - if (keysDeletedOut) { - *keysDeletedOut += removed; - } - } else { - auto cursor = _store->newWriteCursor(opCtx); - _keyGen.visitPathsForDelete(obj, [&](PathView path) { - tassert(6762301, "RecordID cannot be a string for column store indexes", !rid.isStr()); - cursor->remove(path, rid.getLong()); - inc(keysDeletedOut); - }); - } + auto cursor = _store->newWriteCursor(opCtx); + _keyGen.visitPathsForDelete(obj, [&](StringData path) { + tassert(6762301, "RecordID cannot be a string for column store indexes", !rid.isStr()); + cursor->remove(path, rid.getLong()); + inc(keysDeletedOut); + }); } Status ColumnStoreAccessMethod::update(OperationContext* opCtx, @@ -368,88 +318,37 @@ Status ColumnStoreAccessMethod::update(OperationContext* opCtx, int64_t* keysInsertedOut, int64_t* keysDeletedOut) { PooledFragmentBuilder buf(pooledBufferBuilder); + auto cursor = _store->newWriteCursor(opCtx); + _keyGen.visitDiffForUpdate( + oldDoc, + newDoc, + [&](column_keygen::ColumnKeyGenerator::DiffAction diffAction, + StringData path, + const column_keygen::UnencodedCellView* cell) { + if (diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kDelete) { + tassert( + 6762302, "RecordID cannot be a string for column store indexes", !rid.isStr()); + cursor->remove(path, rid.getLong()); + inc(keysDeletedOut); + return; + } - if (_indexCatalogEntry->isHybridBuilding()) { - auto columnKeys = StorageExecutionContext::get(opCtx).columnKeys(); - _keyGen.visitDiffForUpdate( - oldDoc, - newDoc, - [&](column_keygen::ColumnKeyGenerator::DiffAction diffAction, - StringData path, - const column_keygen::UnencodedCellView* cell) { - if (diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kDelete) { - columnKeys->emplace_back(std::make_tuple(path.toString(), "", rid)); - int64_t removed = 0; - fassert(6597802, - _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnKeys, IndexBuildInterceptor::Op::kDelete, &removed)); - - if (keysDeletedOut) { - *keysDeletedOut += removed; - } - return; - } - - // kInsert and kUpdate are handled almost identically. If we switch to using - // `overwrite=true` cursors in WT, we could consider making them the same, - // although that might disadvantage other implementations of the storage engine - // API. - buf.reset(); - column_keygen::writeEncodedCell(*cell, &buf); - - const auto method = - diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kInsert - ? IndexBuildInterceptor::Op::kInsert - : IndexBuildInterceptor::Op::kUpdate; - - columnKeys->emplace_back(std::make_tuple( - path.toString(), CellView{buf.buf(), size_t(buf.len())}.toString(), rid)); - - int64_t inserted = 0; - Status status = _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnKeys, method, &inserted); - if (keysInsertedOut) { - *keysInsertedOut += inserted; - } - }); - - } else { - auto cursor = _store->newWriteCursor(opCtx); - _keyGen.visitDiffForUpdate( - oldDoc, - newDoc, - [&](column_keygen::ColumnKeyGenerator::DiffAction diffAction, - StringData path, - const column_keygen::UnencodedCellView* cell) { - if (diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kDelete) { - tassert(6762302, - "RecordID cannot be a string for column store indexes", - !rid.isStr()); - cursor->remove(path, rid.getLong()); - inc(keysDeletedOut); - return; - } - - // kInsert and kUpdate are handled almost identically. If we switch to using - // `overwrite=true` cursors in WT, we could consider making them the same, although - // that might disadvantage other implementations of the storage engine API. - buf.reset(); - column_keygen::writeEncodedCell(*cell, &buf); + // kInsert and kUpdate are handled almost identically. If we switch to using + // `overwrite=true` cursors in WT, we could consider making them the same, although that + // might disadvantage other implementations of the storage engine API. + buf.reset(); + column_keygen::writeEncodedCell(*cell, &buf); - const auto method = - diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kInsert - ? &ColumnStore::WriteCursor::insert - : &ColumnStore::WriteCursor::update; - tassert( - 6762303, "RecordID cannot be a string for column store indexes", !rid.isStr()); - (cursor.get()->*method)( - path, rid.getLong(), CellView{buf.buf(), size_t(buf.len())}); + const auto method = diffAction == column_keygen::ColumnKeyGenerator::DiffAction::kInsert + ? &ColumnStore::WriteCursor::insert + : &ColumnStore::WriteCursor::update; + tassert(6762303, "RecordID cannot be a string for column store indexes", !rid.isStr()); + (cursor.get()->*method)(path, rid.getLong(), CellView{buf.buf(), size_t(buf.len())}); - inc(keysInsertedOut); - }); - } + inc(keysInsertedOut); + }); return Status::OK(); -} // namespace mongo +} Status ColumnStoreAccessMethod::initializeAsEmpty(OperationContext* opCtx) { return Status::OK(); @@ -484,9 +383,8 @@ std::unique_ptr ColumnStoreAccessMethod::initiat size_t maxMemoryUsageBytes, const boost::optional& stateInfo, StringData dbName) { - return (stateInfo && stateInfo->getFileName()) - ? std::make_unique(this, maxMemoryUsageBytes, *stateInfo, dbName) - : std::make_unique(this, maxMemoryUsageBytes, dbName); + return stateInfo ? std::make_unique(this, maxMemoryUsageBytes, *stateInfo, dbName) + : std::make_unique(this, maxMemoryUsageBytes, dbName); } std::shared_ptr ColumnStoreAccessMethod::getSharedIdent() const { @@ -497,41 +395,4 @@ void ColumnStoreAccessMethod::setIdent(std::shared_ptr ident) { _store->setIdent(std::move(ident)); } -void ColumnStoreAccessMethod::applyColumnDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - int64_t* keysInserted, - int64_t* keysDeleted) { - const IndexBuildInterceptor::Op opType = operation.getStringField("op") == "i"_sd - ? IndexBuildInterceptor::Op::kInsert - : operation.getStringField("op") == "d"_sd ? IndexBuildInterceptor::Op::kDelete - : IndexBuildInterceptor::Op::kUpdate; - - RecordId rid = RecordId::deserializeToken(operation.getField("rid")); - - CellView cell = operation.getStringField("cell"); - PathView path = operation.getStringField("path"); - - auto cursor = _store->newWriteCursor(opCtx); - - tassert(6597803, "RecordID cannot be a string for column store indexes", !rid.isStr()); - switch (opType) { - case IndexBuildInterceptor::Op::kInsert: - cursor->insert(path, rid.getLong(), cell); - inc(keysInserted); - opCtx->recoveryUnit()->onRollback([keysInserted] { dec(keysInserted); }); - break; - case IndexBuildInterceptor::Op::kDelete: - cursor->remove(path, rid.getLong()); - inc(keysDeleted); - opCtx->recoveryUnit()->onRollback([keysDeleted] { dec(keysDeleted); }); - break; - case IndexBuildInterceptor::Op::kUpdate: - cursor->update(path, rid.getLong(), cell); - inc(keysInserted); - opCtx->recoveryUnit()->onRollback([keysInserted] { dec(keysInserted); }); - break; - } -} - } // namespace mongo diff --git a/src/mongo/db/index/columns_access_method.h b/src/mongo/db/index/columns_access_method.h index 44848fa114c..fefa5240468 100644 --- a/src/mongo/db/index/columns_access_method.h +++ b/src/mongo/db/index/columns_access_method.h @@ -75,7 +75,6 @@ public: const InsertDeleteOptions& options, int64_t* keysDeletedOut, CheckRecordId checkRecordId) final; - Status update(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& oldDoc, @@ -86,12 +85,6 @@ public: int64_t* keysInsertedOut, int64_t* keysDeletedOut) final; - void applyColumnDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - int64_t* keysInserted, - int64_t* keysDeleted) final; - Status initializeAsEmpty(OperationContext* opCtx) final; void validate(OperationContext* opCtx, @@ -124,11 +117,6 @@ public: class BulkBuilder; private: - void _visitCellsForIndexInsert(OperationContext* opCtx, - PooledFragmentBuilder& pooledFragmentBuilder, - const std::vector& bsonRecords, - function_ref cb) const; - const std::unique_ptr _store; IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog const IndexDescriptor* const _descriptor; diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index a3ef4649369..db5a1974aa1 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -620,66 +620,6 @@ void SortedDataIndexAccessMethod::setIdent(std::shared_ptr newIdent) { this->_newInterface->setIdent(std::move(newIdent)); } -Status SortedDataIndexAccessMethod::applySortedDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - const InsertDeleteOptions& options, - KeyHandlerFn&& onDuplicateKey, - int64_t* const keysInserted, - int64_t* const keysDeleted) { - auto opType = [&operation] { - switch (operation.getStringField("op")[0]) { - case 'i': - return IndexBuildInterceptor::Op::kInsert; - case 'd': - return IndexBuildInterceptor::Op::kDelete; - case 'u': - return IndexBuildInterceptor::Op::kUpdate; - default: - MONGO_UNREACHABLE; - } - }(); - - // Deserialize the encoded KeyString::Value. - int keyLen; - const char* binKey = operation["key"].binData(keyLen); - BufReader reader(binKey, keyLen); - const KeyString::Value keyString = - KeyString::Value::deserialize(reader, getSortedDataInterface()->getKeyStringVersion()); - - const KeyStringSet keySet{keyString}; - if (opType == IndexBuildInterceptor::Op::kInsert) { - int64_t numInserted; - auto status = insertKeysAndUpdateMultikeyPaths(opCtx, - coll, - {keySet.begin(), keySet.end()}, - {}, - MultikeyPaths{}, - options, - std::move(onDuplicateKey), - &numInserted); - if (!status.isOK()) { - return status; - } - - *keysInserted += numInserted; - opCtx->recoveryUnit()->onRollback( - [keysInserted, numInserted] { *keysInserted -= numInserted; }); - } else { - invariant(opType == IndexBuildInterceptor::Op::kDelete); - int64_t numDeleted; - Status s = removeKeys(opCtx, {keySet.begin(), keySet.end()}, options, &numDeleted); - if (!s.isOK()) { - return s; - } - - *keysDeleted += numDeleted; - opCtx->recoveryUnit()->onRollback( - [keysDeleted, numDeleted] { *keysDeleted -= numDeleted; }); - } - return Status::OK(); -} - void IndexAccessMethod::BulkBuilder::countNewBuildInStats() { indexBulkBuilderSSS.count.addAndFetch(1); } @@ -1075,7 +1015,7 @@ void SortedDataIndexAccessMethod::getKeys(OperationContext* opCtx, multikeyPaths->clear(); } - if (!opCtx->checkForInterruptNoAssert().isOK()) { + if (opCtx->isKillPending()) { throw; } @@ -1266,6 +1206,7 @@ void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( if (!status.isOK()) { LOGV2(20362, + "Couldn't unindex record {obj} from collection {namespace}: {error}", "Couldn't unindex record", "record"_attr = redact(obj), "namespace"_attr = ns, diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index 40e012aa238..8c565152b67 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -174,24 +174,6 @@ public: */ virtual void setIdent(std::shared_ptr newIdent) = 0; - virtual Status applySortedDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - const InsertDeleteOptions& options, - KeyHandlerFn&& onDuplicateKey, - int64_t* const keysInserted, - int64_t* const keysDeleted) { - MONGO_UNREACHABLE; - }; - - virtual void applyColumnDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - int64_t* keysInserted, - int64_t* keysDeleted) { - MONGO_UNREACHABLE; - }; - // // Bulk operations support // @@ -579,14 +561,6 @@ public: void setIdent(std::shared_ptr newIdent) final; - Status applySortedDataSideWrite(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& operation, - const InsertDeleteOptions& options, - KeyHandlerFn&& onDuplicateKey, - int64_t* keysInserted, - int64_t* keysDeleted) final; - std::unique_ptr initiateBulk(size_t maxMemoryUsageBytes, const boost::optional& stateInfo, StringData dbName) final; diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp index 872c5301d85..36072c25bf5 100644 --- a/src/mongo/db/index/index_build_interceptor.cpp +++ b/src/mongo/db/index/index_build_interceptor.cpp @@ -39,7 +39,6 @@ #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop.h" #include "mongo/db/db_raii.h" -#include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_build_interceptor_gen.h" #include "mongo/db/multi_key_path_tracker.h" @@ -292,25 +291,56 @@ Status IndexBuildInterceptor::_applyWrite(OperationContext* opCtx, TrackDuplicates trackDups, int64_t* const keysInserted, int64_t* const keysDeleted) { - // Check field for "key" to determine if collection is sorted data or column store. - if (operation.hasField("key")) { - return _indexCatalogEntry->accessMethod()->applySortedDataSideWrite( + // Deserialize the encoded KeyString::Value. + int keyLen; + const char* binKey = operation["key"].binData(keyLen); + BufReader reader(binKey, keyLen); + auto accessMethod = _indexCatalogEntry->accessMethod()->asSortedData(); + const KeyString::Value keyString = KeyString::Value::deserialize( + reader, accessMethod->getSortedDataInterface()->getKeyStringVersion()); + + const Op opType = operation.getStringField("op") == "i"_sd ? Op::kInsert : Op::kDelete; + + const KeyStringSet keySet{keyString}; + if (opType == Op::kInsert) { + int64_t numInserted; + auto status = accessMethod->insertKeysAndUpdateMultikeyPaths( opCtx, coll, - operation, + {keySet.begin(), keySet.end()}, + {}, + MultikeyPaths{}, options, [=](const KeyString::Value& duplicateKey) { return trackDups == TrackDuplicates::kTrack ? recordDuplicateKey(opCtx, duplicateKey) : Status::OK(); }, - keysInserted, - keysDeleted); + &numInserted); + if (!status.isOK()) { + return status; + } + + *keysInserted += numInserted; + opCtx->recoveryUnit()->onRollback( + [keysInserted, numInserted] { *keysInserted -= numInserted; }); } else { - _indexCatalogEntry->accessMethod()->applyColumnDataSideWrite( - opCtx, coll, operation, keysInserted, keysDeleted); - return Status::OK(); + invariant(opType == Op::kDelete); + if (kDebugBuild) + invariant(operation.getStringField("op") == "d"_sd); + + int64_t numDeleted; + Status s = + accessMethod->removeKeys(opCtx, {keySet.begin(), keySet.end()}, options, &numDeleted); + if (!s.isOK()) { + return s; + } + + *keysDeleted += numDeleted; + opCtx->recoveryUnit()->onRollback( + [keysDeleted, numDeleted] { *keysDeleted -= numDeleted; }); } + return Status::OK(); } void IndexBuildInterceptor::_yield(OperationContext* opCtx, const Yieldable* yieldable) { @@ -392,35 +422,6 @@ boost::optional IndexBuildInterceptor::getMultikeyPaths() const { return _multikeyPaths; } -Status IndexBuildInterceptor::_finishSideWrite(OperationContext* opCtx, - const std::vector& toInsert) { - _sideWritesCounter->fetchAndAdd(toInsert.size()); - // This insert may roll back, but not necessarily from inserting into this table. If other write - // operations outside this table and in the same transaction are rolled back, this counter also - // needs to be rolled back. - opCtx->recoveryUnit()->onRollback([sharedCounter = _sideWritesCounter, size = toInsert.size()] { - sharedCounter->fetchAndSubtract(size); - }); - - std::vector records; - for (auto& doc : toInsert) { - records.emplace_back(Record{RecordId(), // The storage engine will assign its own RecordId - // when we pass one that is null. - RecordData(doc.objdata(), doc.objsize())}); - } - - LOGV2_DEBUG(20691, - 2, - "Recording side write keys on index", - "numRecords"_attr = records.size(), - "index"_attr = _indexCatalogEntry->descriptor()->indexName()); - - // By passing a vector of null timestamps, these inserts are not timestamped individually, but - // rather with the timestamp of the owning operation. - std::vector timestamps(records.size()); - return _sideWritesTable->rs()->insertRecords(opCtx, &records, timestamps); -} - Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, @@ -428,7 +429,6 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, Op op, int64_t* const numKeysOut) { invariant(opCtx->lockState()->inAWriteUnitOfWork()); - invariant(op != IndexBuildInterceptor::Op::kUpdate); // Maintain parity with IndexAccessMethods handling of key counting. Only include // `multikeyMetadataKeys` when inserting. @@ -478,9 +478,9 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, } if (op == Op::kInsert) { - // Wildcard indexes write multikey path information, typically part of the catalog document, - // to the index itself. Multikey information is never deleted, so we only need to add this - // data on the insert path. + // Wildcard indexes write multikey path information, typically part of the catalog + // document, to the index itself. Multikey information is never deleted, so we only need + // to add this data on the insert path. for (const auto& keyString : multikeyMetadataKeys) { builder.reset(); keyString.serialize(builder); @@ -491,41 +491,33 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, } } - return _finishSideWrite(opCtx, std::move(toInsert)); -} - -Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, - const PathCellSet& keys, - Op op, - int64_t* const numKeysOut) { - invariant(opCtx->lockState()->inAWriteUnitOfWork()); - - *numKeysOut = keys.size(); - - std::vector toInsert; - toInsert.reserve(keys.size()); - for (const auto& [path, cell, rid] : keys) { - - BSONObjBuilder builder; - rid.serializeToken("rid", &builder); - builder.append("op", [op] { - switch (op) { - case Op::kInsert: - return "i"; - case Op::kDelete: - return "d"; - case Op::kUpdate: - return "u"; - } - MONGO_UNREACHABLE; - }()); - builder.append("path", path); - builder.append("cell", cell); + _sideWritesCounter->fetchAndAdd(toInsert.size()); + // This insert may roll back, but not necessarily from inserting into this table. If other write + // operations outside this table and in the same transaction are rolled back, this counter also + // needs to be rolled back. + opCtx->recoveryUnit()->onRollback([sharedCounter = _sideWritesCounter, size = toInsert.size()] { + sharedCounter->fetchAndSubtract(size); + }); - toInsert.push_back(builder.obj()); + std::vector records; + for (auto& doc : toInsert) { + records.emplace_back(Record{RecordId(), // The storage engine will assign its own RecordId + // when we pass one that is null. + RecordData(doc.objdata(), doc.objsize())}); } - return _finishSideWrite(opCtx, std::move(toInsert)); + LOGV2_DEBUG(20691, + 2, + "recording {records_size} side write keys on index " + "'{indexCatalogEntry_descriptor_indexName}'", + "records_size"_attr = records.size(), + "indexCatalogEntry_descriptor_indexName"_attr = + _indexCatalogEntry->descriptor()->indexName()); + + // By passing a vector of null timestamps, these inserts are not timestamped individually, but + // rather with the timestamp of the owning operation. + std::vector timestamps(records.size()); + return _sideWritesTable->rs()->insertRecords(opCtx, &records, timestamps); } Status IndexBuildInterceptor::retrySkippedRecords(OperationContext* opCtx, diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h index b1888d46f76..46c4f5e6e8b 100644 --- a/src/mongo/db/index/index_build_interceptor.h +++ b/src/mongo/db/index/index_build_interceptor.h @@ -31,13 +31,11 @@ #include -#include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/duplicate_key_tracker.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/skipped_record_tracker.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/temporary_record_store.h" #include "mongo/db/yieldable.h" #include "mongo/platform/atomic_word.h" @@ -55,7 +53,7 @@ public: */ enum class DrainYieldPolicy { kNoYield, kYield }; - enum class Op { kInsert, kDelete, kUpdate }; + enum class Op { kInsert, kDelete }; /** * Indicates whether to record duplicate keys that have been inserted into the index. When set @@ -102,18 +100,6 @@ public: Op op, int64_t* numKeysOut); - /** - * Client writes that are concurrent with a column store index build will have their index - * updates written to a temporary table. After the index table scan is complete, these updates - * will be applied to the underlying index table. - * - * On success, `numKeysOut` if non-null will contain the number of keys added or removed. - */ - Status sideWrite(OperationContext* opCtx, - const PathCellSet& columnstoreKeys, - Op op, - int64_t* numKeysOut); - /** * Given a duplicate key, record the key for later verification by a call to * checkDuplicateKeyConstraints(); @@ -187,6 +173,7 @@ public: private: using SideWriteRecord = std::pair; + Status _applyWrite(OperationContext* opCtx, const CollectionPtr& coll, const BSONObj& doc, @@ -206,8 +193,6 @@ private: FailPoint* fp, long long iteration) const; - Status _finishSideWrite(OperationContext* opCtx, const std::vector& toInsert); - // The entry for the index that is being built. const IndexCatalogEntry* _indexCatalogEntry; @@ -239,4 +224,5 @@ private: MONGO_MAKE_LATCH("IndexBuildInterceptor::_multikeyPathMutex"); boost::optional _multikeyPaths; }; + } // namespace mongo diff --git a/src/mongo/db/index/index_build_interceptor_test.cpp b/src/mongo/db/index/index_build_interceptor_test.cpp index e0f9f86c98a..30f996d69d2 100644 --- a/src/mongo/db/index/index_build_interceptor_test.cpp +++ b/src/mongo/db/index/index_build_interceptor_test.cpp @@ -30,7 +30,6 @@ #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/index/index_build_interceptor.h" -#include "mongo/idl/server_parameter_test_util.h" namespace mongo { namespace { @@ -119,238 +118,5 @@ TEST_F(IndexBuilderInterceptorTest, SingleInsertIsSavedToSideWritesTable) { << "key" << serializedKeyString), sideWrites[0]); } - - -TEST_F(IndexBuilderInterceptorTest, SingleColumnInsertIsSavedToSideWritesTable) { - RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); - auto interceptor = createIndexBuildInterceptor( - fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); - - PathCellSet columnKeys; - columnKeys.emplace_back(std::make_tuple("path", "cell", RecordId(1))); - - WriteUnitOfWork wuow(operationContext()); - int64_t numKeys = 0; - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys, IndexBuildInterceptor::Op::kInsert, &numKeys)); - ASSERT_EQ(1, numKeys); - wuow.commit(); - - BSONObjBuilder builder; - RecordId(1).serializeToken("rid", &builder); - BSONObj obj = builder.obj(); - BSONElement elem = obj["rid"]; - - auto sideWrites = getSideWritesTableContents(std::move(interceptor)); - ASSERT_EQ(1, sideWrites.size()); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem << "op" - << "i" - << "path" - << "path" - << "cell" - << "cell"), - sideWrites[0]); -} - -TEST_F(IndexBuilderInterceptorTest, SingleColumnDeleteIsSavedToSideWritesTable) { - RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); - auto interceptor = createIndexBuildInterceptor( - fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); - - PathCellSet columnKeys; - columnKeys.emplace_back(std::make_tuple("path", "", RecordId(1))); - - WriteUnitOfWork wuow(operationContext()); - int64_t numKeys = 0; - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys, IndexBuildInterceptor::Op::kDelete, &numKeys)); - ASSERT_EQ(1, numKeys); - wuow.commit(); - - BSONObjBuilder builder; - RecordId(1).serializeToken("rid", &builder); - BSONObj obj = builder.obj(); - BSONElement elem = obj["rid"]; - - auto sideWrites = getSideWritesTableContents(std::move(interceptor)); - ASSERT_EQ(1, sideWrites.size()); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem << "op" - << "d" - << "path" - << "path" - << "cell" - << ""), - sideWrites[0]); -} - -TEST_F(IndexBuilderInterceptorTest, SingleColumnUpdateIsSavedToSideWritesTable) { - RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); - auto interceptor = createIndexBuildInterceptor( - fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); - - // create path + cell + rid - PathCellSet columnKeys; - columnKeys.emplace_back(std::make_tuple("path", "cell", RecordId(1))); - - WriteUnitOfWork wuow(operationContext()); - int64_t numKeys = 0; - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys, IndexBuildInterceptor::Op::kUpdate, &numKeys)); - ASSERT_EQ(1, numKeys); - wuow.commit(); - - BSONObjBuilder builder; - RecordId(1).serializeToken("rid", &builder); - BSONObj obj = builder.obj(); - BSONElement elem = obj["rid"]; - - auto sideWrites = getSideWritesTableContents(std::move(interceptor)); - ASSERT_EQ(1, sideWrites.size()); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem << "op" - << "u" - << "path" - << "path" - << "cell" - << "cell"), - sideWrites[0]); -} - -TEST_F(IndexBuilderInterceptorTest, MultipleColumnInsertsAreSavedToSideWritesTable) { - RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); - auto interceptor = createIndexBuildInterceptor( - fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); - - PathCellSet columnKeys; - columnKeys.emplace_back(std::make_tuple("path", "cell", RecordId(1))); - columnKeys.emplace_back(std::make_tuple("path1", "cell1", RecordId(1))); - columnKeys.emplace_back(std::make_tuple("path2", "cell2", RecordId(2))); - columnKeys.emplace_back(std::make_tuple("path3", "cell3", RecordId(2))); - - WriteUnitOfWork wuow(operationContext()); - int64_t numKeys = 0; - - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys, IndexBuildInterceptor::Op::kInsert, &numKeys)); - ASSERT_EQ(4, numKeys); - wuow.commit(); - - BSONObjBuilder builder; - RecordId(1).serializeToken("rid", &builder); - BSONObj obj = builder.obj(); - BSONElement elem1 = obj["rid"]; - - BSONObjBuilder builder2; - RecordId(2).serializeToken("rid", &builder2); - BSONObj obj2 = builder2.obj(); - BSONElement elem2 = obj2["rid"]; - - auto sideWrites = getSideWritesTableContents(std::move(interceptor)); - ASSERT_EQ(4, sideWrites.size()); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem1 << "op" - << "i" - << "path" - << "path" - << "cell" - << "cell"), - sideWrites[0]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem1 << "op" - << "i" - << "path" - << "path1" - << "cell" - << "cell1"), - sideWrites[1]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem2 << "op" - << "i" - << "path" - << "path2" - << "cell" - << "cell2"), - sideWrites[2]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem2 << "op" - << "i" - << "path" - << "path3" - << "cell" - << "cell3"), - sideWrites[3]); -} - -TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWritesTable) { - RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); - auto interceptor = createIndexBuildInterceptor( - fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); - - WriteUnitOfWork wuow(operationContext()); - int64_t numKeys = 0; - - PathCellSet columnKeys; - columnKeys.emplace_back(std::make_tuple("path", "cell", RecordId(1))); - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys, IndexBuildInterceptor::Op::kInsert, &numKeys)); - ASSERT_EQ(1, numKeys); - - PathCellSet columnKeys2; - columnKeys2.emplace_back(std::make_tuple("path", "", RecordId(1))); - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys2, IndexBuildInterceptor::Op::kDelete, &numKeys)); - ASSERT_EQ(1, numKeys); - - - PathCellSet columnKeys3; - columnKeys3.emplace_back(std::make_tuple("path1", "cell1", RecordId(2))); - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys3, IndexBuildInterceptor::Op::kUpdate, &numKeys)); - ASSERT_EQ(1, numKeys); - - PathCellSet columnKeys4; - columnKeys4.emplace_back(std::make_tuple("path2", "cell2", RecordId(2))); - ASSERT_OK(interceptor->sideWrite( - operationContext(), columnKeys4, IndexBuildInterceptor::Op::kInsert, &numKeys)); - ASSERT_EQ(1, numKeys); - wuow.commit(); - - BSONObjBuilder builder; - RecordId(1).serializeToken("rid", &builder); - BSONObj obj = builder.obj(); - BSONElement elem1 = obj["rid"]; - - BSONObjBuilder builder2; - RecordId(2).serializeToken("rid", &builder2); - BSONObj obj2 = builder2.obj(); - BSONElement elem2 = obj2["rid"]; - - auto sideWrites = getSideWritesTableContents(std::move(interceptor)); - ASSERT_EQ(4, sideWrites.size()); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem1 << "op" - << "i" - << "path" - << "path" - << "cell" - << "cell"), - sideWrites[0]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem1 << "op" - << "d" - << "path" - << "path" - << "cell" - << ""), - sideWrites[1]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem2 << "op" - << "u" - << "path" - << "path1" - << "cell" - << "cell1"), - sideWrites[2]); - ASSERT_BSONOBJ_EQ(BSON("rid" << elem2 << "op" - << "i" - << "path" - << "path2" - << "cell" - << "cell2"), - sideWrites[3]); -} - } // namespace } // namespace mongo diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index 6e17ed3789d..440cea078b1 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -2407,8 +2407,9 @@ void IndexBuildsCoordinator::_runIndexBuildInner( // * Explicitly abort the index build with abortIndexBuildByBuildUUID() before performing an // operation that causes the index build to throw an error. // TODO (SERVER-69264): Remove ErrorCodes::CannotCreateIndex. - if (opCtx->checkForInterruptNoAssert().isOK() && - status.code() != ErrorCodes::CannotCreateIndex) { + // TODO (SERVER-69496): Remove ErrorCodes::InterruptedAtShutdown. + if (!opCtx->isKillPending() && status.code() != ErrorCodes::CannotCreateIndex && + status.code() != ErrorCodes::InterruptedAtShutdown) { if (TestingProctor::instance().isEnabled()) { LOGV2_FATAL( 6967700, "Unexpected error code during index build cleanup", "error"_attr = status); diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp index b5b64570504..a120bfc3082 100644 --- a/src/mongo/db/namespace_string.cpp +++ b/src/mongo/db/namespace_string.cpp @@ -192,9 +192,6 @@ const NamespaceString NamespaceString::kSetChangeStreamStateCoordinatorNamespace const NamespaceString NamespaceString::kGlobalIndexClonerNamespace( NamespaceString::kConfigDb, "localGlobalIndexOperations.cloner"); -const NamespaceString NamespaceString::kConfigQueryAnalyzersNamespace(NamespaceString::kConfigDb, - "queryAnalyzers"); - NamespaceString NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(StringData ns) { if (!gMultitenancySupport) { return NamespaceString(ns, boost::none); @@ -224,6 +221,20 @@ bool NamespaceString::isLegalClientSystemNS( const ServerGlobalParams::FeatureCompatibility& currentFCV) const { auto dbname = dbName().db(); + NamespaceString parsedNSS; + if (gMultitenancySupport && !tenantId()) { + // TODO (SERVER-67423) Remove support for mangled dbname in isLegalClientSystemNS check + // Transitional support for accepting tenantId as a mangled database name. + try { + parsedNSS = parseFromStringExpectTenantIdInMultitenancyMode(ns()); + if (parsedNSS.tenantId()) { + dbname = parsedNSS.dbName().db(); + } + } catch (const DBException&) { + // Swallow exception. + } + } + if (dbname == kAdminDb) { if (coll() == "system.roles") return true; diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h index 98928cb6a35..31689947d41 100644 --- a/src/mongo/db/namespace_string.h +++ b/src/mongo/db/namespace_string.h @@ -267,9 +267,6 @@ public: // Namespace used for storing global index cloner state documents. static const NamespaceString kGlobalIndexClonerNamespace; - // Namespace used for storing query analyzer settings. - static const NamespaceString kConfigQueryAnalyzersNamespace; - /** * Constructs an empty NamespaceString. */ diff --git a/src/mongo/db/namespace_string_test.cpp b/src/mongo/db/namespace_string_test.cpp index 736f024d3af..72e3ee770a6 100644 --- a/src/mongo/db/namespace_string_test.cpp +++ b/src/mongo/db/namespace_string_test.cpp @@ -358,7 +358,7 @@ TEST(NamespaceStringTest, NSSNoCollectionWithTenantId) { } TEST(NamespaceStringTest, ParseNSSWithTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); TenantId tenantId(OID::gen()); std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; diff --git a/src/mongo/db/op_observer/SConscript b/src/mongo/db/op_observer/SConscript index 804864cf510..d1faee83d60 100644 --- a/src/mongo/db/op_observer/SConscript +++ b/src/mongo/db/op_observer/SConscript @@ -121,43 +121,3 @@ env.Library( '$BUILD_DIR/mongo/db/s/user_writes_recoverable_critical_section', ], ) - -env.CppUnitTest( - target='db_op_observer_test', - source=[ - 'op_observer_impl_test.cpp', - 'op_observer_registry_test.cpp', - 'user_write_block_mode_op_observer_test.cpp', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/db/batched_write_context', - '$BUILD_DIR/mongo/db/catalog/catalog_helpers', - '$BUILD_DIR/mongo/db/catalog/collection', - '$BUILD_DIR/mongo/db/catalog/import_collection_oplog_entry', - '$BUILD_DIR/mongo/db/catalog/local_oplog_info', - '$BUILD_DIR/mongo/db/commands/create_command', - '$BUILD_DIR/mongo/db/concurrency/exception_util', - '$BUILD_DIR/mongo/db/dbdirectclient', - '$BUILD_DIR/mongo/db/read_write_concern_defaults', - '$BUILD_DIR/mongo/db/read_write_concern_defaults_mock', - '$BUILD_DIR/mongo/db/repl/image_collection_entry', - '$BUILD_DIR/mongo/db/repl/oplog', - '$BUILD_DIR/mongo/db/repl/oplog_interface_local', - '$BUILD_DIR/mongo/db/repl/replmocks', - '$BUILD_DIR/mongo/db/repl/storage_interface_impl', - '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', - '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', - '$BUILD_DIR/mongo/db/session/session_catalog', - '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/shard_role', - '$BUILD_DIR/mongo/db/storage/recovery_unit_base', - '$BUILD_DIR/mongo/db/transaction/transaction', - '$BUILD_DIR/mongo/db/write_block_bypass', - 'op_observer_impl', - 'op_observer_util', - 'oplog_writer_impl', - 'user_write_block_mode_op_observer', - ], -) diff --git a/src/mongo/db/op_observer/fcv_op_observer.h b/src/mongo/db/op_observer/fcv_op_observer.h index c7641fd6d9a..9cce64d85f4 100644 --- a/src/mongo/db/op_observer/fcv_op_observer.h +++ b/src/mongo/db/op_observer/fcv_op_observer.h @@ -86,8 +86,7 @@ public: BSONObj indexDoc) final {} void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -211,6 +210,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/op_observer/op_observer.h b/src/mongo/db/op_observer/op_observer.h index f8bfd52e235..c422cb84950 100644 --- a/src/mongo/db/op_observer/op_observer.h +++ b/src/mongo/db/op_observer/op_observer.h @@ -133,8 +133,7 @@ public: virtual void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) = 0; + const UUID& globalIndexUUID) = 0; virtual void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -487,6 +486,9 @@ public: * The 'reservedSlots' is a list of oplog slots reserved for the oplog entries in a transaction. * The last reserved slot represents the prepareOpTime used for the prepare oplog entry. * + * The 'numberOfPrePostImagesToWrite' is the number of CRUD operations that have a pre-image + * to write as a noop oplog entry. + * * The 'wallClockTime' is the time to record as wall clock time on oplog entries resulting from * transaction preparation. * @@ -496,6 +498,7 @@ public: virtual std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) = 0; diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp index d8f128e3c0a..2a9a08e8195 100644 --- a/src/mongo/db/op_observer/op_observer_impl.cpp +++ b/src/mongo/db/op_observer/op_observer_impl.cpp @@ -304,7 +304,6 @@ void logGlobalIndexDDLOperation(OperationContext* opCtx, const NamespaceString& globalIndexNss, const UUID& globalIndexUUID, const StringData commandString, - boost::optional numKeys, OplogWriter* oplogWriter) { invariant(!opCtx->inMultiDocumentTransaction()); @@ -315,16 +314,6 @@ void logGlobalIndexDDLOperation(OperationContext* opCtx, MutableOplogEntry oplogEntry; oplogEntry.setOpType(repl::OpTypeEnum::kCommand); oplogEntry.setObject(builder.done()); - - // On global index drops, persist the number of records into the 'o2' field similar to a - // collection drop. This allows for efficiently restoring the index keys count after rollback - // without forcing a collection scan. - invariant((numKeys && commandString == "dropGlobalIndex") || - (!numKeys && commandString == "createGlobalIndex")); - if (numKeys) { - oplogEntry.setObject2(makeObject2ForDropOrRename(*numKeys)); - } - // The 'ns' field is technically redundant as it can be derived from the uuid, however it's a // required oplog entry field. oplogEntry.setNss(globalIndexNss.getCommandNS()); @@ -358,21 +347,16 @@ void OpObserverImpl::onCreateGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, const UUID& globalIndexUUID) { constexpr StringData commandString = "createGlobalIndex"_sd; - logGlobalIndexDDLOperation(opCtx, - globalIndexNss, - globalIndexUUID, - commandString, - boost::none /* numKeys */, - _oplogWriter.get()); + logGlobalIndexDDLOperation( + opCtx, globalIndexNss, globalIndexUUID, commandString, _oplogWriter.get()); } void OpObserverImpl::onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) { + const UUID& globalIndexUUID) { constexpr StringData commandString = "dropGlobalIndex"_sd; logGlobalIndexDDLOperation( - opCtx, globalIndexNss, globalIndexUUID, commandString, numKeys, _oplogWriter.get()); + opCtx, globalIndexNss, globalIndexUUID, commandString, _oplogWriter.get()); } void OpObserverImpl::onCreateIndex(OperationContext* opCtx, @@ -2174,6 +2158,7 @@ void OpObserverImpl::onPreparedTransactionCommit( std::unique_ptr OpObserverImpl::preTransactionPrepare(OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) { auto applyOpsOplogSlotAndOperationAssignment = diff --git a/src/mongo/db/op_observer/op_observer_impl.h b/src/mongo/db/op_observer/op_observer_impl.h index bb704238a01..63fe23e0caa 100644 --- a/src/mongo/db/op_observer/op_observer_impl.h +++ b/src/mongo/db/op_observer/op_observer_impl.h @@ -64,8 +64,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final; + const UUID& globalIndexUUID) final; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -229,6 +228,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final; diff --git a/src/mongo/db/op_observer/op_observer_impl_test.cpp b/src/mongo/db/op_observer/op_observer_impl_test.cpp index 4b24b08bc70..bb54d4bc07a 100644 --- a/src/mongo/db/op_observer/op_observer_impl_test.cpp +++ b/src/mongo/db/op_observer/op_observer_impl_test.cpp @@ -618,7 +618,7 @@ TEST_F(OpObserverTest, OnDropCollectionReturnsDropOpTime) { } TEST_F(OpObserverTest, OnDropCollectionInlcudesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); @@ -679,7 +679,7 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) { } TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOff) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", false); OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); @@ -716,7 +716,7 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOff) { } TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOn) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); @@ -834,7 +834,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntry) { } TEST_F(OpObserverTest, ImportCollectionOplogEntryIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); @@ -879,7 +879,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntryIncludesTenantId) { } TEST_F(OpObserverTest, SingleStatementInsertTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); std::vector insert; @@ -906,7 +906,7 @@ TEST_F(OpObserverTest, SingleStatementInsertTestIncludesTenantId) { } TEST_F(OpObserverTest, SingleStatementUpdateTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); CollectionUpdateArgs updateArgs; @@ -936,7 +936,7 @@ TEST_F(OpObserverTest, SingleStatementUpdateTestIncludesTenantId) { } TEST_F(OpObserverTest, SingleStatementDeleteTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); auto opCtx = cc().makeOperationContext(); @@ -1145,8 +1145,8 @@ protected: size_t numberOfPrePostImagesToWrite = 0) { auto txnOps = txnParticipant().retrieveCompletedTransactionOperations(opCtx()); auto currentTime = Date_t::now(); - auto applyOpsAssignment = - opObserver().preTransactionPrepare(opCtx(), reservedSlots, currentTime, txnOps); + auto applyOpsAssignment = opObserver().preTransactionPrepare( + opCtx(), reservedSlots, numberOfPrePostImagesToWrite, currentTime, txnOps); opCtx()->recoveryUnit()->setPrepareTimestamp(prepareOpTime.getTimestamp()); opObserver().onTransactionPrepare(opCtx(), reservedSlots, @@ -1708,7 +1708,7 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTest) { } TEST_F(OpObserverTransactionTest, TransactionalInsertTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); auto txnParticipant = TransactionParticipant::get(opCtx()); @@ -1832,7 +1832,7 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) { } TEST_F(OpObserverTransactionTest, TransactionalUpdateTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); auto txnParticipant = TransactionParticipant::get(opCtx()); @@ -1932,7 +1932,7 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTest) { } TEST_F(OpObserverTransactionTest, TransactionalDeleteTestIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); auto txnParticipant = TransactionParticipant::get(opCtx()); @@ -2858,7 +2858,7 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdate) { // Repeat the same test as above, but assert tenantId is included when available TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdateIncludesTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); // Setup. auto opCtxRaii = cc().makeOperationContext(); diff --git a/src/mongo/db/op_observer/op_observer_noop.h b/src/mongo/db/op_observer/op_observer_noop.h index 8bea94e5578..fca2d16727f 100644 --- a/src/mongo/db/op_observer/op_observer_noop.h +++ b/src/mongo/db/op_observer/op_observer_noop.h @@ -46,8 +46,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -196,6 +195,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { return nullptr; diff --git a/src/mongo/db/op_observer/op_observer_registry.h b/src/mongo/db/op_observer/op_observer_registry.h index 0ce554bfb9a..4848466a403 100644 --- a/src/mongo/db/op_observer/op_observer_registry.h +++ b/src/mongo/db/op_observer/op_observer_registry.h @@ -76,11 +76,10 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final { + const UUID& globalIndexUUID) final { ReservedTimes times{opCtx}; for (auto& o : _observers) - o->onDropGlobalIndex(opCtx, globalIndexNss, globalIndexUUID, numKeys); + o->onDropGlobalIndex(opCtx, globalIndexNss, globalIndexUUID); }; void onCreateIndex(OperationContext* const opCtx, @@ -434,13 +433,14 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { std::unique_ptr applyOpsOplogSlotAndOperationAssignment; for (auto&& observer : _observers) { - auto applyOpsAssignment = - observer->preTransactionPrepare(opCtx, reservedSlots, wallClockTime, statements); + auto applyOpsAssignment = observer->preTransactionPrepare( + opCtx, reservedSlots, numberOfPrePostImagesToWrite, wallClockTime, statements); tassert(6278501, "More than one OpObserver returned operation to \"applyOps\" assignment", !(applyOpsAssignment && applyOpsOplogSlotAndOperationAssignment)); diff --git a/src/mongo/db/op_observer/user_write_block_mode_op_observer.h b/src/mongo/db/op_observer/user_write_block_mode_op_observer.h index 40e39378e68..8eab9c9544b 100644 --- a/src/mongo/db/op_observer/user_write_block_mode_op_observer.h +++ b/src/mongo/db/op_observer/user_write_block_mode_op_observer.h @@ -173,8 +173,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; // Index builds committing can be left unchecked since we kill any active index builds before // enabling write blocking. This means any index build which gets to the commit phase while @@ -241,6 +240,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 0a38119164d..80ccc91c559 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -183,7 +183,7 @@ public: // here. No-op updates will not generate a new lastOp, so we still need the // guard to fire in that case. Operations on the local DB aren't replicated, so they // don't need to bump the lastOp. - replClientInfo().setLastOpToSystemLastOpTimeIgnoringCtxInterrupted(_opCtx); + replClientInfo().setLastOpToSystemLastOpTimeIgnoringShutdownCtxCancelled(_opCtx); LOGV2_DEBUG(20888, 5, "Set last op to system time: {timestamp}", diff --git a/src/mongo/db/pipeline/abt/abt_optimization_test.cpp b/src/mongo/db/pipeline/abt/abt_optimization_test.cpp index 5c640901c3b..2d74409e4ee 100644 --- a/src/mongo/db/pipeline/abt/abt_optimization_test.cpp +++ b/src/mongo/db/pipeline/abt/abt_optimization_test.cpp @@ -42,7 +42,6 @@ unittest::GoldenTestConfig goldenTestConfigABTOptimization{"src/mongo/db/test_ou TEST(ABTTranslate, OptimizePipelineTests) { unittest::GoldenTestContext gctx(&goldenTestConfigABTOptimization); - // TODO SERVER-70028: the $or and $in explains below should be equal after 70028 is complete. testABTTranslationAndOptimization( gctx, "optimized $match with $or: pipeline is able to use a SargableNode with a disjunction of " @@ -53,6 +52,7 @@ TEST(ABTTranslate, OptimizePipelineTests) { {{{"collection", ScanDefinition{{}, {{"index1", makeIndexDefinition("a", CollationOp::Ascending)}}}}}}); + // TODO SERVER-67819 Support indexing for eqMember op type testABTTranslationAndOptimization( gctx, "optimized $match with $in and a list of equalities becomes a comparison to an EqMember " diff --git a/src/mongo/db/pipeline/change_stream_event_transform_test.cpp b/src/mongo/db/pipeline/change_stream_event_transform_test.cpp index 16e09d38961..d5724544de3 100644 --- a/src/mongo/db/pipeline/change_stream_event_transform_test.cpp +++ b/src/mongo/db/pipeline/change_stream_event_transform_test.cpp @@ -169,7 +169,7 @@ TEST(ChangeStreamEventTransformTest, TestUpdateTransformWithTenantId) { // Turn on multitenancySupport, but not featureFlagRequireTenantId. We expect the tenantId to be // part of the 'ns' field in the oplog entry, but it should not be a part of the db name in the // change event. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const auto documentKey = Document{{"x", 1}, {"y", 1}}; const auto tenantId = TenantId(OID::gen()); @@ -214,7 +214,7 @@ TEST(ChangeStreamEventTransformTest, TestRenameTransformWithTenantId) { // Turn on multitenancySupport, but not featureFlagRequireTenantId. We expect the tenantId to be // part of the 'ns' field in the oplog entry, but it should not be a part of the db name in the // change event. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const auto tenantId = TenantId(OID::gen()); NamespaceString renameFrom(tenantId, "unittests.serverless_change_stream"); @@ -273,7 +273,7 @@ TEST(ChangeStreamEventTransformTest, TestDropDatabaseTransformWithTenantId) { // Turn on multitenancySupport, but not featureFlagRequireTenantId. We expect the tenantId to be // part of the 'ns' field in the oplog entry, but it should not be a part of the db name in the // change event. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const auto tenantId = TenantId(OID::gen()); NamespaceString dbToDrop(tenantId, "unittests"); @@ -311,7 +311,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateTransformWithTenantId) { // Turn on multitenancySupport, but not featureFlagRequireTenantId. We expect the tenantId to be // part of the 'ns' field in the oplog entry, but it should not be a part of the db name in the // change event. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const auto tenantId = TenantId(OID::gen()); NamespaceString nssWithTenant(tenantId, "unittests.serverless_change_stream"); @@ -350,7 +350,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransformWithTenantId) { // Turn on multitenancySupport, but not featureFlagRequireTenantId. We expect the tenantId to be // part of the 'ns' field in the oplog entry, but it should not be a part of the db name in the // change event. - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const auto tenantId = TenantId(OID::gen()); diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h index a406d233c03..da281834566 100644 --- a/src/mongo/db/pipeline/expression.h +++ b/src/mongo/db/pipeline/expression.h @@ -3258,7 +3258,9 @@ public: static StatusWith apply(Value lhs, Value rhs); explicit ExpressionSubtract(ExpressionContext* const expCtx) - : ExpressionFixedArity(expCtx) {} + : ExpressionFixedArity(expCtx) { + expCtx->sbeCompatible = false; + } ExpressionSubtract(ExpressionContext* const expCtx, ExpressionVector&& children) : ExpressionFixedArity(expCtx, std::move(children)) {} diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h index e939c248a9d..1c0213d0132 100644 --- a/src/mongo/db/query/canonical_query.h +++ b/src/mongo/db/query/canonical_query.h @@ -245,14 +245,6 @@ public: _explain = explain; } - bool getForceGenerateRecordId() const { - return _forceGenerateRecordId; - } - - void setForceGenerateRecordId(bool value) { - _forceGenerateRecordId = value; - } - OperationContext* getOpCtx() const { tassert(6508300, "'CanonicalQuery' does not have an 'ExpressionContext'", _expCtx); return _expCtx->opCtx; @@ -318,11 +310,6 @@ private: // True if this query can be executed by the SBE. bool _sbeCompatible = false; - // True if this query must produce a RecordId output in addition to the BSON objects that - // constitute the result set of the query. Any generated query solution must not discard record - // ids, even if the optimizer detects that they are not going to be consumed downstream. - bool _forceGenerateRecordId = false; - // A map from assigned InputParamId's to parameterised MatchExpression's. std::vector _inputParamIdToExpressionMap; }; diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp index 52344ab35f8..348e196d7f9 100644 --- a/src/mongo/db/query/canonical_query_encoder.cpp +++ b/src/mongo/db/query/canonical_query_encoder.cpp @@ -1119,7 +1119,6 @@ std::string encodeSBE(const CanonicalQuery& cq) { bufBuilder.appendBuf(proj.objdata(), proj.objsize()); bufBuilder.appendStr(strBuilderEncoded, false /* includeEndingNull */); - bufBuilder.appendChar(cq.getForceGenerateRecordId() ? 1 : 0); encodeFindCommandRequest(cq.getFindCommandRequest(), &bufBuilder); diff --git a/src/mongo/db/query/ce/SConscript b/src/mongo/db/query/ce/SConscript index 37fdc253507..99b258b4e7f 100644 --- a/src/mongo/db/query/ce/SConscript +++ b/src/mongo/db/query/ce/SConscript @@ -117,16 +117,6 @@ env.CppUnitTest( ], ) -env.CppUnitTest( - target="ce_edge_cases_test", - source=[ - "ce_edge_cases_test.cpp", - ], - LIBDEPS=[ - 'ce_test_utils', - ], -) - env.CppUnitTest( target='stats_cache_loader_test', source=[ diff --git a/src/mongo/db/query/ce/ce_edge_cases_test.cpp b/src/mongo/db/query/ce/ce_edge_cases_test.cpp deleted file mode 100644 index 14cf86e17de..00000000000 --- a/src/mongo/db/query/ce/ce_edge_cases_test.cpp +++ /dev/null @@ -1,508 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/query/ce/array_histogram.h" -#include "mongo/db/query/ce/ce_test_utils.h" -#include "mongo/db/query/ce/histogram_estimation.h" -#include "mongo/db/query/sbe_stage_builder_helpers.h" -#include "mongo/unittest/unittest.h" - -namespace mongo::ce { -namespace { - -using namespace sbe; - -TEST(EstimatorTest, OneBucketIntHistogram) { - // Data set of 10 values, each with frequency 3, in the range (-inf, 100]. - // Example: { -100, -20, 0, 20, 50, 60, 70, 80, 90, 100}. - std::vector data{{100, 3.0, 27.0, 9.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(30.0, getTotals(hist).card); - - // Estimates with the bucket bound. - ASSERT_EQ(3.0, estimateIntValCard(hist, 100, EstimationType::kEqual)); - ASSERT_EQ(27.0, estimateIntValCard(hist, 100, EstimationType::kLess)); - ASSERT_EQ(30.0, estimateIntValCard(hist, 100, EstimationType::kLessOrEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kGreater)); - ASSERT_EQ(3.0, estimateIntValCard(hist, 100, EstimationType::kGreaterOrEqual)); - - // Estimates with a value inside the bucket. - ASSERT_EQ(3.0, estimateIntValCard(hist, 10, EstimationType::kEqual)); - // No interpolation possible for estimates of inequalities in a single bucket. The estimates - // are based on the default cardinality of half bucket +/- the estimate of equality inside of - // the bucket. - ASSERT_EQ(10.5, estimateIntValCard(hist, 10, EstimationType::kLess)); - ASSERT_EQ(13.5, estimateIntValCard(hist, 10, EstimationType::kLessOrEqual)); - ASSERT_EQ(16.5, estimateIntValCard(hist, 10, EstimationType::kGreater)); - ASSERT_EQ(19.5, estimateIntValCard(hist, 10, EstimationType::kGreaterOrEqual)); - - // Estimates for a value larger than the last bucket bound. - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kEqual)); - ASSERT_EQ(30.0, estimateIntValCard(hist, 1000, EstimationType::kLess)); - ASSERT_EQ(30.0, estimateIntValCard(hist, 1000, EstimationType::kLessOrEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kGreater)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kGreaterOrEqual)); -} - -TEST(EstimatorTest, OneExclusiveBucketIntHistogram) { - // Data set of a single value. - // By exclusive bucket we mean a bucket with only boundary, that is the range frequency and NDV - // are zero. - std::vector data{{100, 2.0, 0.0, 0.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(2.0, getTotals(hist).card); - - // Estimates with the bucket boundary. - ASSERT_EQ(2.0, estimateIntValCard(hist, 100, EstimationType::kEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kGreater)); - - ASSERT_EQ(0.0, estimateIntValCard(hist, 0, EstimationType::kEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 0, EstimationType::kLess)); - ASSERT_EQ(2.0, estimateIntValCard(hist, 0, EstimationType::kGreater)); - - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kEqual)); - ASSERT_EQ(2.0, estimateIntValCard(hist, 1000, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kGreater)); -} - -TEST(EstimatorTest, OneBucketTwoIntValuesHistogram) { - // Data set of two values, example {5, 100, 100}. - std::vector data{{100, 2.0, 1.0, 1.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(3.0, getTotals(hist).card); - - // Estimates with the bucket boundary. - ASSERT_EQ(2.0, estimateIntValCard(hist, 100, EstimationType::kEqual)); - ASSERT_EQ(1.0, estimateIntValCard(hist, 100, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kGreater)); - - ASSERT_EQ(1.0, estimateIntValCard(hist, 10, EstimationType::kEqual)); - // Default estimate of half of the bucket's range frequency = 0.5. - ASSERT_EQ(0.5, estimateIntValCard(hist, 10, EstimationType::kLess)); - ASSERT_EQ(2.5, estimateIntValCard(hist, 10, EstimationType::kGreater)); - - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kEqual)); - ASSERT_EQ(3.0, estimateIntValCard(hist, 1000, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kGreater)); -} - -TEST(EstimatorTest, OneBucketTwoIntValuesHistogram2) { - // Similar to the above test with higher frequency for the second value. - // Example {5, 5, 5, 100, 100}. - std::vector data{{100, 2.0, 3.0, 1.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(5.0, getTotals(hist).card); - - // Estimates with the bucket boundary. - ASSERT_EQ(2.0, estimateIntValCard(hist, 100, EstimationType::kEqual)); - ASSERT_EQ(3.0, estimateIntValCard(hist, 100, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kGreater)); - - ASSERT_EQ(3.0, estimateIntValCard(hist, 10, EstimationType::kEqual)); - // Default estimate of half of the bucket's range frequency = 1.5. - ASSERT_EQ(1.5, estimateIntValCard(hist, 10, EstimationType::kLess)); - ASSERT_EQ(3.5, estimateIntValCard(hist, 10, EstimationType::kGreater)); - - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kEqual)); - ASSERT_EQ(5.0, estimateIntValCard(hist, 1000, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1000, EstimationType::kGreater)); -} - - -TEST(EstimatorTest, TwoBucketsIntHistogram) { - // Data set of 10 values in the range [1, 100]. - std::vector data{{1, 1.0, 0.0, 0.0}, {100, 3.0, 26.0, 8.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(30.0, getTotals(hist).card); - - // Estimates for a value smaller than the first bucket. - ASSERT_EQ(0.0, estimateIntValCard(hist, -42, EstimationType::kEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, -42, EstimationType::kLess)); - ASSERT_EQ(0.0, estimateIntValCard(hist, -42, EstimationType::kLessOrEqual)); - ASSERT_EQ(30.0, estimateIntValCard(hist, -42, EstimationType::kGreater)); - ASSERT_EQ(30.0, estimateIntValCard(hist, -42, EstimationType::kGreaterOrEqual)); - - // Estimates with bucket bounds. - ASSERT_EQ(1.0, estimateIntValCard(hist, 1, EstimationType::kEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 1, EstimationType::kLess)); - ASSERT_EQ(1.0, estimateIntValCard(hist, 1, EstimationType::kLessOrEqual)); - ASSERT_EQ(29.0, estimateIntValCard(hist, 1, EstimationType::kGreater)); - ASSERT_EQ(30.0, estimateIntValCard(hist, 1, EstimationType::kGreaterOrEqual)); - - ASSERT_EQ(3.0, estimateIntValCard(hist, 100, EstimationType::kEqual)); - ASSERT_EQ(27.0, estimateIntValCard(hist, 100, EstimationType::kLess)); - ASSERT_EQ(30.0, estimateIntValCard(hist, 100, EstimationType::kLessOrEqual)); - ASSERT_EQ(0.0, estimateIntValCard(hist, 100, EstimationType::kGreater)); - ASSERT_EQ(3.0, estimateIntValCard(hist, 100, EstimationType::kGreaterOrEqual)); - - // Estimates with a value inside the bucket. The estimates use interpolation. - // The bucket ratio for the value of 10 is smaller than the estimate for equality - // and the estimates for Less and LessOrEqual are the same. - ASSERT_APPROX_EQUAL(3.3, estimateIntValCard(hist, 10, EstimationType::kEqual), 0.1); - ASSERT_APPROX_EQUAL(3.4, estimateIntValCard(hist, 10, EstimationType::kLess), 0.1); - ASSERT_APPROX_EQUAL(3.4, estimateIntValCard(hist, 10, EstimationType::kLessOrEqual), 0.1); - ASSERT_APPROX_EQUAL(26.6, estimateIntValCard(hist, 10, EstimationType::kGreater), 0.1); - ASSERT_APPROX_EQUAL(26.6, estimateIntValCard(hist, 10, EstimationType::kGreaterOrEqual), 0.1); - - // Different estimates for Less and LessOrEqual for the value of 50. - ASSERT_APPROX_EQUAL(3.3, estimateIntValCard(hist, 50, EstimationType::kEqual), 0.1); - ASSERT_APPROX_EQUAL(10.6, estimateIntValCard(hist, 50, EstimationType::kLess), 0.1); - ASSERT_APPROX_EQUAL(13.9, estimateIntValCard(hist, 50, EstimationType::kLessOrEqual), 0.1); - ASSERT_APPROX_EQUAL(16.1, estimateIntValCard(hist, 50, EstimationType::kGreater), 0.1); - ASSERT_APPROX_EQUAL(19.4, estimateIntValCard(hist, 50, EstimationType::kGreaterOrEqual), 0.1); -} - -TEST(EstimatorTest, ThreeExclusiveBucketsIntHistogram) { - std::vector data{{1, 1.0, 0.0, 0.0}, {10, 8.0, 0.0, 0.0}, {100, 1.0, 0.0, 0.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(10.0, getTotals(hist).card); - - ASSERT_EQ(0.0, estimateIntValCard(hist, 5, EstimationType::kEqual)); - ASSERT_EQ(1.0, estimateIntValCard(hist, 5, EstimationType::kLess)); - ASSERT_EQ(1.0, estimateIntValCard(hist, 5, EstimationType::kLessOrEqual)); - ASSERT_EQ(9.0, estimateIntValCard(hist, 5, EstimationType::kGreater)); - ASSERT_EQ(9.0, estimateIntValCard(hist, 5, EstimationType::kGreaterOrEqual)); -} -TEST(EstimatorTest, OneBucketStrHistogram) { - std::vector data{{"xyz", 3.0, 27.0, 9.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(30.0, getTotals(hist).card); - - // Estimates with bucket bound. - auto [tag, value] = value::makeNewString("xyz"_sd); - value::ValueGuard vg(tag, value); - double expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(27.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_EQ(30.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_EQ(3.0, expectedCard); - - // Estimates for a value inside the bucket. Since there is no low value bound in the histogram - // all values smaller than the upper bound will be estimated the same way using half of the - // bucket cardinality. - std::tie(tag, value) = value::makeNewString("a"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(10.5, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_EQ(13.5, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(16.5, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_EQ(19.5, expectedCard); - - std::tie(tag, value) = value::makeNewString(""_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(10.5, expectedCard); - // Can we do better? Figure out that the query value is the smallest in its data type. - - // Estimates for a value larger than the upper bound. - std::tie(tag, value) = value::makeNewString("z"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(30.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); -} - -TEST(EstimatorTest, TwoBucketsStrHistogram) { - // Data set of 100 strings in the range ["abc", "xyz"], with average frequency of 2. - std::vector data{{"abc", 2.0, 0.0, 0.0}, {"xyz", 3.0, 95.0, 48.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(100.0, getTotals(hist).card); - - // Estimates for a value smaller than the first bucket bound. - auto [tag, value] = value::makeNewString("a"_sd); - value::ValueGuard vg(tag, value); - - double expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(100.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_EQ(100.0, expectedCard); - - // Estimates with bucket bounds. - std::tie(tag, value) = value::makeNewString("abc"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(2.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_EQ(2.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(98.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_EQ(100.0, expectedCard); - - std::tie(tag, value) = value::makeNewString("xyz"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(97.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_EQ(100.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_EQ(3.0, expectedCard); - - // Estimates for a value inside the bucket. - std::tie(tag, value) = value::makeNewString("sun"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_APPROX_EQUAL(2.0, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_APPROX_EQUAL(74.4, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_APPROX_EQUAL(76.4, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_APPROX_EQUAL(23.6, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_APPROX_EQUAL(25.6, expectedCard, 0.1); - - // Estimate for a value very close to the bucket bound. - std::tie(tag, value) = value::makeNewString("xyw"_sd); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_APPROX_EQUAL(2.0, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_APPROX_EQUAL(95.0, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kLessOrEqual).card; - ASSERT_APPROX_EQUAL(97.0, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_APPROX_EQUAL(3.0, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kGreaterOrEqual).card; - ASSERT_APPROX_EQUAL(5.0, expectedCard, 0.1); -} - -TEST(EstimatorTest, TwoBucketsDateHistogram) { - // June 6, 2017 -- June 7, 2017. - const int64_t startInstant = 1496777923000LL; - const int64_t endInstant = 1496864323000LL; - const auto startDate = Date_t::fromMillisSinceEpoch(startInstant); - const auto endDate = Date_t::fromMillisSinceEpoch(endInstant); - - std::vector data{{Value(startDate), 3.0, 0.0, 0.0}, - {Value(endDate), 1.0, 96.0, 48.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(100.0, getTotals(hist).card); - - const auto valueBefore = value::bitcastFrom(startInstant - 1); - double expectedCard = - estimate(hist, value::TypeTags::Date, valueBefore, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueBefore, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Date, valueBefore, EstimationType::kGreater).card; - ASSERT_EQ(100.0, expectedCard); - - const auto valueStart = value::bitcastFrom(startInstant); - expectedCard = estimate(hist, value::TypeTags::Date, valueStart, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueStart, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueStart, EstimationType::kGreater).card; - ASSERT_EQ(97.0, expectedCard); - - const auto valueEnd = value::bitcastFrom(endInstant); - expectedCard = estimate(hist, value::TypeTags::Date, valueEnd, EstimationType::kEqual).card; - ASSERT_EQ(1.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueEnd, EstimationType::kLess).card; - ASSERT_EQ(99.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueEnd, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); - - const auto valueIn = value::bitcastFrom(startInstant + 43000000); - expectedCard = estimate(hist, value::TypeTags::Date, valueIn, EstimationType::kEqual).card; - ASSERT_EQ(2.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueIn, EstimationType::kLess).card; - ASSERT_APPROX_EQUAL(48.8, expectedCard, 0.1); - expectedCard = estimate(hist, value::TypeTags::Date, valueIn, EstimationType::kGreater).card; - ASSERT_APPROX_EQUAL(49.2, expectedCard, 0.1); - - const auto valueAfter = value::bitcastFrom(endInstant + 100); - expectedCard = estimate(hist, value::TypeTags::Date, valueAfter, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueAfter, EstimationType::kLess).card; - ASSERT_EQ(100.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Date, valueAfter, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); -} - -TEST(EstimatorTest, TwoBucketsTimestampHistogram) { - // June 6, 2017 -- June 7, 2017 in seconds. - const int64_t startInstant = 1496777923LL; - const int64_t endInstant = 1496864323LL; - const Timestamp startTs{Seconds(startInstant), 0}; - const Timestamp endTs{Seconds(endInstant), 0}; - - std::vector data{{Value(startTs), 3.0, 0.0, 0.0}, {Value(endTs), 1.0, 96.0, 48.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(100.0, getTotals(hist).card); - - const auto valueBefore = value::bitcastFrom(startTs.asULL() - 1); - double expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueBefore, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueBefore, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueBefore, EstimationType::kGreater).card; - ASSERT_EQ(100.0, expectedCard); - - const auto valueStart = value::bitcastFrom( - startTs.asULL()); // NB: startTs.asInt64() produces different value. - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueStart, EstimationType::kEqual).card; - ASSERT_EQ(3.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueStart, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueStart, EstimationType::kGreater).card; - ASSERT_EQ(97.0, expectedCard); - - const auto valueEnd = value::bitcastFrom(endTs.asULL()); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueEnd, EstimationType::kEqual).card; - ASSERT_EQ(1.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Timestamp, valueEnd, EstimationType::kLess).card; - ASSERT_EQ(99.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueEnd, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); - - const auto valueIn = value::bitcastFrom((startTs.asULL() + endTs.asULL()) / 2); - expectedCard = estimate(hist, value::TypeTags::Timestamp, valueIn, EstimationType::kEqual).card; - ASSERT_EQ(2.0, expectedCard); - expectedCard = estimate(hist, value::TypeTags::Timestamp, valueIn, EstimationType::kLess).card; - ASSERT_APPROX_EQUAL(49.0, expectedCard, 0.1); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueIn, EstimationType::kGreater).card; - ASSERT_APPROX_EQUAL(49.0, expectedCard, 0.1); - - const auto valueAfter = value::bitcastFrom(endTs.asULL() + 100); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueAfter, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueAfter, EstimationType::kLess).card; - ASSERT_EQ(100.0, expectedCard); - expectedCard = - estimate(hist, value::TypeTags::Timestamp, valueAfter, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); -} - -TEST(EstimatorTest, TwoBucketsObjectIdHistogram) { - const auto startOid = OID("63340d8d27afef2de7357e8d"); - const auto endOid = OID("63340dbed6cd8af737d4139a"); - ASSERT_TRUE(startOid < endOid); - - std::vector data{{Value(startOid), 2.0, 0.0, 0.0}, - {Value(endOid), 1.0, 97.0, 77.0}}; - const ScalarHistogram hist = createHistogram(data); - - ASSERT_EQ(100.0, getTotals(hist).card); - - auto [tag, value] = value::makeNewObjectId(); - value::ValueGuard vg(tag, value); - const auto oidBefore = OID("63340d8d27afef2de7357e8c"); - oidBefore.view().readInto(value::getObjectIdView(value)); - - double expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(100.0, expectedCard); - - // Bucket bounds. - startOid.view().readInto(value::getObjectIdView(value)); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(2.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(98.0, expectedCard); - - endOid.view().readInto(value::getObjectIdView(value)); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(1.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(99.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); - - // ObjectId value inside the bucket. - const auto oidInside = OID("63340db2cd4d46ff39178e9d"); - oidInside.view().readInto(value::getObjectIdView(value)); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_APPROX_EQUAL(1.2, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_APPROX_EQUAL(83.9, expectedCard, 0.1); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_APPROX_EQUAL(14.8, expectedCard, 0.1); - - const auto oidAfter = OID("63340dbed6cd8af737d4139b"); - oidAfter.view().readInto(value::getObjectIdView(value)); - expectedCard = estimate(hist, tag, value, EstimationType::kEqual).card; - ASSERT_EQ(0.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kLess).card; - ASSERT_EQ(100.0, expectedCard); - expectedCard = estimate(hist, tag, value, EstimationType::kGreater).card; - ASSERT_EQ(0.0, expectedCard); -} - -} // namespace -} // namespace mongo::ce diff --git a/src/mongo/db/query/ce/ce_histogram_test.cpp b/src/mongo/db/query/ce/ce_histogram_test.cpp index 6e38fd707b7..70c884c6e8a 100644 --- a/src/mongo/db/query/ce/ce_histogram_test.cpp +++ b/src/mongo/db/query/ce/ce_histogram_test.cpp @@ -335,11 +335,11 @@ TEST(CEHistogramTest, TestOneBoundIntRangeHistogram) { ASSERT_MATCH_CE(t, "{intRange: {$gt: 10}}", 46.0); ASSERT_MATCH_CE(t, "{intRange: {$gte: 15}}", 28.5); ASSERT_MATCH_CE(t, "{intRange: {$gt: 15}}", 23.5); - ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lte: 20}}", 41.5); + ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lte: 20}}", 46.5); ASSERT_MATCH_CE(t, "{intRange: {$gt: 11}, intRange: {$lte: 20}}", 41.5); // Test ranges that partially overlap with the entire histogram. - ASSERT_MATCH_CE(t, "{intRange: {$lt: 11}}", 9.5); + ASSERT_MATCH_CE(t, "{intRange: {$lt: 11}}", 4.5); ASSERT_MATCH_CE(t, "{intRange: {$lt: 15}}", 22.5); ASSERT_MATCH_CE(t, "{intRange: {$lte: 15}}", 27.5); ASSERT_MATCH_CE(t, "{intRange: {$gte: 8}, intRange: {$lte: 15}}", 27.5); @@ -386,7 +386,7 @@ TEST(CEHistogramTest, TestOneBoundIntRangeHistogram) { // node corresponding to the path "intRange", we have two keys and two ranges, both // corresponding to the same path. As a consequence, we combine the estimates for the intervals // using exponential backoff, which results in an overestimate. - ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lt: 20}}", 41.09); + ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lt: 20}}", 46.04); ASSERT_MATCH_CE(t, "{intRange: {$gt: 11}, intRange: {$lt: 20}}", 41.09); ASSERT_MATCH_CE(t, "{intRange: {$gt: 12}, intRange: {$lt: 15}}", 19.16); ASSERT_MATCH_CE(t, "{intRange: {$gte: 12}, intRange: {$lt: 15}}", 20.42); @@ -399,7 +399,7 @@ TEST(CEHistogramTest, TestOneBoundIntRangeHistogram) { t.setIndexes( {{"intRangeIndex", makeIndexDefinition("intRange", CollationOp::Ascending, /* isMultiKey */ false)}}); - ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lt: 20}}", 40.5); + ASSERT_MATCH_CE(t, "{intRange: {$gte: 11}, intRange: {$lt: 20}}", 45.5); ASSERT_MATCH_CE(t, "{intRange: {$gt: 11}, intRange: {$lt: 20}}", 40.5); ASSERT_MATCH_CE(t, "{intRange: {$gt: 12}, intRange: {$lt: 15}}", 8.5); ASSERT_MATCH_CE(t, "{intRange: {$gte: 12}, intRange: {$lt: 15}}", 13.5); @@ -647,7 +647,7 @@ TEST(CEHistogramTest, TestArrayHistogramOnCompositePredicates) { ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$eq: 5}}, array: {$eq: 5}}", 35.0); // Test case with multiple predicates and ranges. - ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$lt: 5}}, mixed: {$lt: 5}}", 68.75); + ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$lt: 5}}, mixed: {$lt: 5}}", 67.88); ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$lt: 5}}, mixed: {$gt: 5}}", 28.19); // Test multiple $elemMatches. diff --git a/src/mongo/db/query/ce/ce_interpolation_test.cpp b/src/mongo/db/query/ce/ce_interpolation_test.cpp index 63c134cb131..043d9074b10 100644 --- a/src/mongo/db/query/ce/ce_interpolation_test.cpp +++ b/src/mongo/db/query/ce/ce_interpolation_test.cpp @@ -38,6 +38,11 @@ namespace { using namespace sbe; +double estimateIntValCard(const ScalarHistogram& hist, const int v, const EstimationType type) { + auto [tag, val] = std::make_pair(value::TypeTags::NumberInt64, value::bitcastFrom(v)); + return estimate(hist, tag, val, type).card; +}; + TEST(EstimatorTest, ManualHistogram) { std::vector data{{0, 1.0, 1.0, 1.0}, {10, 1.0, 10.0, 5.0}, @@ -476,7 +481,7 @@ TEST(EstimatorTest, UniformIntMixedArrayEstimate) { highTag, highVal, true /* includeScalar */); - ASSERT_APPROX_EQUAL(92.9, expectedCard, 0.1); // Actual: 94. + ASSERT_APPROX_EQUAL(90.9, expectedCard, 0.1); // Actual: 94. // Test interpolation for query: [{$match: {a: {$elemMatch: {$gt: 500, $lt: 550}}}}]. expectedCard = estimateCardRange(arrHist, diff --git a/src/mongo/db/query/ce/ce_test_utils.cpp b/src/mongo/db/query/ce/ce_test_utils.cpp index c4e76b2ef39..407b6ecd47c 100644 --- a/src/mongo/db/query/ce/ce_test_utils.cpp +++ b/src/mongo/db/query/ce/ce_test_utils.cpp @@ -142,6 +142,12 @@ optimizer::CEType CETester::getCE(ABT& abt) const { } ScalarHistogram createHistogram(const std::vector& data) { + sbe::value::Array array; + for (const auto& item : data) { + const auto [tag, val] = stage_builder::makeValue(item._v); + array.push_back(tag, val); + } + value::Array bounds; std::vector buckets; @@ -149,10 +155,10 @@ ScalarHistogram createHistogram(const std::vector& data) { double cumulativeNDV = 0.0; for (size_t i = 0; i < data.size(); i++) { - const auto& item = data.at(i); - const auto [tag, val] = stage_builder::makeValue(item._v); + const auto [tag, val] = array.getAt(i); bounds.push_back(tag, val); + const auto& item = data.at(i); cumulativeFreq += item._equalFreq + item._rangeFreq; cumulativeNDV += item._ndv + 1.0; buckets.emplace_back( @@ -161,11 +167,4 @@ ScalarHistogram createHistogram(const std::vector& data) { return {std::move(bounds), std::move(buckets)}; } - -double estimateIntValCard(const ScalarHistogram& hist, const int v, const EstimationType type) { - const auto [tag, val] = - std::make_pair(value::TypeTags::NumberInt64, value::bitcastFrom(v)); - return estimate(hist, tag, val, type).card; -}; - } // namespace mongo::ce diff --git a/src/mongo/db/query/ce/ce_test_utils.h b/src/mongo/db/query/ce/ce_test_utils.h index 9e6ebdeb5e8..667a3b70dc3 100644 --- a/src/mongo/db/query/ce/ce_test_utils.h +++ b/src/mongo/db/query/ce/ce_test_utils.h @@ -32,7 +32,6 @@ #include #include -#include "mongo/db/query/ce/histogram_estimation.h" #include "mongo/db/query/ce/scalar_histogram.h" #include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" @@ -72,15 +71,14 @@ const OptPhaseManager::PhaseSet kNoOptPhaseSet{}; * expecting. */ -#define _ASSERT_MATCH_CE(ce, predicate, expectedCE) \ - if constexpr (kCETestLogOnly) { \ - if (std::abs(ce.getCE(predicate) - expectedCE) > kMaxCEError) { \ - std::cout << "ERROR: cardinality " << ce.getCE(predicate) << " expected " \ - << expectedCE << std::endl; \ - } \ - ASSERT_APPROX_EQUAL(1.0, 1.0, kMaxCEError); \ - } else { \ - ASSERT_APPROX_EQUAL(expectedCE, ce.getCE(predicate), kMaxCEError); \ +#define _ASSERT_MATCH_CE(ce, predicate, expectedCE) \ + if constexpr (kCETestLogOnly) { \ + if (std::abs(ce.getCE(predicate) - expectedCE) > kMaxCEError) { \ + std::cout << "ERROR: expected " << expectedCE << std::endl; \ + } \ + ASSERT_APPROX_EQUAL(1.0, 1.0, kMaxCEError); \ + } else { \ + ASSERT_APPROX_EQUAL(expectedCE, ce.getCE(predicate), kMaxCEError); \ } #define _PREDICATE(field, predicate) (str::stream() << "{" << field << ": " << predicate "}") #define _ELEMMATCH_PREDICATE(field, predicate) \ @@ -161,7 +159,5 @@ struct BucketData { ScalarHistogram createHistogram(const std::vector& data); -double estimateIntValCard(const ScalarHistogram& hist, int v, EstimationType type); - } // namespace ce } // namespace mongo diff --git a/src/mongo/db/query/ce/histogram_estimation.cpp b/src/mongo/db/query/ce/histogram_estimation.cpp index 52851e5118f..fbd041380b4 100644 --- a/src/mongo/db/query/ce/histogram_estimation.cpp +++ b/src/mongo/db/query/ce/histogram_estimation.cpp @@ -95,16 +95,14 @@ EstimationResult interpolateEstimateInBucket(const ScalarHistogram& h, } } - const double bucketFreqRatio = bucket._rangeFreq * ratio; - resultCard += bucketFreqRatio; + resultCard += bucket._rangeFreq * ratio; resultNDV += bucket._ndv * ratio; if (type == EstimationType::kLess) { // Subtract from the estimate the cardinality and ndv corresponding to the equality - // operation, if they are larger than the ratio taken from this bucket. - const double innerEqFreqCorrection = (bucketFreqRatio < innerEqFreq) ? 0.0 : innerEqFreq; + // operation. const double innerEqNdv = (bucket._ndv * ratio <= 1.0) ? 0.0 : 1.0; - resultCard -= innerEqFreqCorrection; + resultCard -= innerEqFreq; resultNDV -= innerEqNdv; } return {resultCard, resultNDV}; diff --git a/src/mongo/db/query/ce/maxdiff_histogram_test.cpp b/src/mongo/db/query/ce/maxdiff_histogram_test.cpp index 2192b586237..78c6490a0e9 100644 --- a/src/mongo/db/query/ce/maxdiff_histogram_test.cpp +++ b/src/mongo/db/query/ce/maxdiff_histogram_test.cpp @@ -129,8 +129,9 @@ TEST_F(HistogramTest, MaxDiffTestInt) { ASSERT_LTE(hist.getBuckets().size(), nBuckets); const double estimatedCard = estimateCard(hist, 11, EstimationType::kLess); + ASSERT_EQ(36, actualCard); - ASSERT_APPROX_EQUAL(43.7333, estimatedCard, kTolerance); + ASSERT_APPROX_EQUAL(39.73333, estimatedCard, kTolerance); } TEST_F(HistogramTest, MaxDiffTestString) { @@ -158,7 +159,7 @@ TEST_F(HistogramTest, MaxDiffTestString) { const double estimatedCard = estimate(hist, tag, val, EstimationType::kLess).card; ASSERT_EQ(15, actualCard); - ASSERT_APPROX_EQUAL(15.9443, estimatedCard, kTolerance); + ASSERT_APPROX_EQUAL(10.9443, estimatedCard, kTolerance); } TEST_F(HistogramTest, MaxDiffTestMixedTypes) { diff --git a/src/mongo/db/query/ce/value_utils.cpp b/src/mongo/db/query/ce/value_utils.cpp index f4bfd0e8b53..9838f91a285 100644 --- a/src/mongo/db/query/ce/value_utils.cpp +++ b/src/mongo/db/query/ce/value_utils.cpp @@ -156,18 +156,6 @@ double valueToDouble(value::TypeTags tag, value::Value val) { const double charToDbl = ch / std::pow(2, i * 8); result += charToDbl; } - } else if (tag == value::TypeTags::Date || tag == value::TypeTags::Timestamp) { - int64_t v = value::bitcastTo(val); - result = value::numericCast(value::TypeTags::NumberInt64, v); - - } else if (tag == value::TypeTags::ObjectId) { - auto objView = - ConstDataView(reinterpret_cast(sbe::value::getObjectIdView(val)->data())); - // Take the first 8 bytes of the ObjectId. - // ToDo: consider using the entire ObjectId or other parts of it - // auto v = objView.read>(sizeof(uint32_t)); - auto v = objView.read>(); - result = value::numericCast(value::TypeTags::NumberInt64, v); } else { uassert(6844500, "Unexpected value type", false); } diff --git a/src/mongo/db/query/cqf_get_executor.cpp b/src/mongo/db/query/cqf_get_executor.cpp index f1843613b1d..f2e13faebbd 100644 --- a/src/mongo/db/query/cqf_get_executor.cpp +++ b/src/mongo/db/query/cqf_get_executor.cpp @@ -484,7 +484,9 @@ std::unique_ptr getSBEExecutorViaCascadesOp const CollectionPtr& collection, const boost::optional& indexHint, std::unique_ptr pipeline, - std::unique_ptr canonicalQuery) { + std::unique_ptr canonicalQuery, + const bool requireRID) { + // Ensure that either pipeline or canonicalQuery is set. tassert(624070, "getSBEExecutorViaCascadesOptimizer expects exactly one of the following to be set: " @@ -501,7 +503,6 @@ std::unique_ptr getSBEExecutorViaCascadesOp auto curOp = CurOp::get(opCtx); curOp->debug().cqfUsed = true; - const bool requireRID = canonicalQuery ? canonicalQuery->getForceGenerateRecordId() : false; const bool collectionExists = collection != nullptr; const std::string uuidStr = collectionExists ? collection->uuid().toString() : ""; const std::string collNameStr = nss.coll().toString(); @@ -623,7 +624,8 @@ std::unique_ptr getSBEExecutorViaCascadesOp } std::unique_ptr getSBEExecutorViaCascadesOptimizer( - const CollectionPtr& collection, std::unique_ptr query) { + const CollectionPtr& collection, std::unique_ptr query, const bool requireRID) { + boost::optional indexHint = query->getFindCommandRequest().getHint().isEmpty() ? boost::none : boost::make_optional(query->getFindCommandRequest().getHint()); @@ -633,8 +635,14 @@ std::unique_ptr getSBEExecutorViaCascadesOp auto expCtx = query->getExpCtx(); auto nss = query->nss(); - return getSBEExecutorViaCascadesOptimizer( - opCtx, expCtx, nss, collection, indexHint, nullptr /* pipeline */, std::move(query)); + return getSBEExecutorViaCascadesOptimizer(opCtx, + expCtx, + nss, + collection, + indexHint, + nullptr /* pipeline */, + std::move(query), + requireRID); } } // namespace mongo diff --git a/src/mongo/db/query/cqf_get_executor.h b/src/mongo/db/query/cqf_get_executor.h index 81454fad196..a2eb34081dc 100644 --- a/src/mongo/db/query/cqf_get_executor.h +++ b/src/mongo/db/query/cqf_get_executor.h @@ -49,12 +49,15 @@ std::unique_ptr getSBEExecutorViaCascadesOp const CollectionPtr& collection, const boost::optional& indexHint, std::unique_ptr pipeline, - std::unique_ptr = nullptr); + std::unique_ptr = nullptr, + bool requireRID = false); /** * Returns a PlanExecutor for the given CanonicalQuery. */ std::unique_ptr getSBEExecutorViaCascadesOptimizer( - const CollectionPtr& collection, std::unique_ptr query); + const CollectionPtr& collection, + std::unique_ptr query, + bool requireRID = false); } // namespace mongo diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp index fa18b4f1084..d11bd53f325 100644 --- a/src/mongo/db/query/datetime/date_time_support.cpp +++ b/src/mongo/db/query/datetime/date_time_support.cpp @@ -166,12 +166,6 @@ void TimeZoneDatabase::TimelibErrorContainerDeleter::operator()( timelib_error_container_dtor(errorContainer); } -void TimeZoneDatabase::TimelibTZInfoDeleter::operator()(timelib_tzinfo* tzInfo) { - if (tzInfo) { - timelib_tzinfo_dtor(tzInfo); - } -} - void TimeZoneDatabase::loadTimeZoneInfo( std::unique_ptr timeZoneDatabase) { invariant(timeZoneDatabase); @@ -199,8 +193,6 @@ void TimeZoneDatabase::loadTimeZoneInfo( _timeZones[entry.id] = TimeZone{nullptr}; timelib_tzinfo_dtor(tzInfo); } else { - _timeZoneInfos.emplace_back( - std::unique_ptr<_timelib_tzinfo, TimelibTZInfoDeleter>(tzInfo)); _timeZones[entry.id] = TimeZone{tzInfo}; } } @@ -403,7 +395,7 @@ std::vector TimeZoneDatabase::getTimeZoneStrings() const { void TimeZone::adjustTimeZone(timelib_time* timelibTime) const { if (isTimeZoneIDZone()) { - timelib_set_timezone(timelibTime, _tzInfo); + timelib_set_timezone(timelibTime, _tzInfo.get()); } else if (isUtcOffsetZone()) { timelib_set_timezone_from_offset(timelibTime, durationCount(_utcOffset)); } @@ -496,7 +488,14 @@ TimeZone::Iso8601DateParts::Iso8601DateParts(const timelib_time& timelib_time, D } -TimeZone::TimeZone(timelib_tzinfo* tzInfo) : _tzInfo(tzInfo), _utcOffset(0) {} +void TimeZone::TimelibTZInfoDeleter::operator()(timelib_tzinfo* tzInfo) { + if (tzInfo) { + timelib_tzinfo_dtor(tzInfo); + } +} + +TimeZone::TimeZone(timelib_tzinfo* tzInfo) + : _tzInfo(tzInfo, TimelibTZInfoDeleter()), _utcOffset(0) {} TimeZone::TimeZone(Seconds utcOffsetSeconds) : _tzInfo(nullptr), _utcOffset(utcOffsetSeconds) {} @@ -581,7 +580,7 @@ Seconds TimeZone::utcOffset(Date_t date) const { int32_t timezoneOffsetFromUTC = 0; int result = timelib_get_time_zone_offset_info(durationCount(date.toDurationSinceEpoch()), - _tzInfo, + _tzInfo.get(), &timezoneOffsetFromUTC, nullptr, nullptr); diff --git a/src/mongo/db/query/datetime/date_time_support.h b/src/mongo/db/query/datetime/date_time_support.h index 77cba17d131..8673f597e77 100644 --- a/src/mongo/db/query/datetime/date_time_support.h +++ b/src/mongo/db/query/datetime/date_time_support.h @@ -344,14 +344,10 @@ public: static void validateFromStringFormat(StringData format); std::unique_ptr<_timelib_time, TimelibTimeDeleter> getTimelibTime(Date_t) const; - _timelib_tzinfo* getTzInfo() const { + std::shared_ptr<_timelib_tzinfo> getTzInfo() const { return _tzInfo; } - Seconds getUtcOffset() const { - return _utcOffset; - } - private: /** * Only works with 1 <= spaces <= 4 and 0 <= number <= 9999. If spaces is less than the digit @@ -385,8 +381,12 @@ private: return Status::OK(); } + struct TimelibTZInfoDeleter { + void operator()(_timelib_tzinfo* tzInfo); + }; + // null if this TimeZone represents the default UTC time zone, or a UTC-offset time zone - _timelib_tzinfo* _tzInfo; + std::shared_ptr<_timelib_tzinfo> _tzInfo; // represents the UTC offset in seconds if _tzInfo is null and it is not 0 Seconds _utcOffset{0}; @@ -486,10 +486,6 @@ public: std::string toString() const; private: - struct TimelibTZInfoDeleter { - void operator()(_timelib_tzinfo* tzInfo); - }; - /** * Populates '_timeZones' with parsed time zone rules for each timezone specified by * 'timeZoneDatabase'. @@ -508,9 +504,6 @@ private: // The timelib structure which provides timezone information. std::unique_ptr<_timelib_tzdb, TimeZoneDBDeleter> _timeZoneDatabase; - - // The list of pre-load _timelib_tzinfo objects. - std::vector> _timeZoneInfos; }; /** diff --git a/src/mongo/db/query/fle/encrypted_predicate.cpp b/src/mongo/db/query/fle/encrypted_predicate.cpp index 74d2ef8fbf4..aeed43cc884 100644 --- a/src/mongo/db/query/fle/encrypted_predicate.cpp +++ b/src/mongo/db/query/fle/encrypted_predicate.cpp @@ -72,20 +72,5 @@ std::vector toValues(std::vector&& vec) { } return output; } - -std::unique_ptr makeTagDisjunction(ExpressionContext* expCtx, - std::vector&& tags) { - std::vector> orListElems; - for (auto&& tagElt : tags) { - // ... and for each tag, construct expression {$in: [tag, - // "$__safeContent__"]}. - std::vector> inVec{ - ExpressionConstant::create(expCtx, tagElt), - ExpressionFieldPath::createPathFromString( - expCtx, kSafeContent, expCtx->variablesParseState)}; - orListElems.push_back(make_intrusive(expCtx, std::move(inVec))); - } - return std::make_unique(expCtx, std::move(orListElems)); -} } // namespace fle } // namespace mongo diff --git a/src/mongo/db/query/fle/encrypted_predicate.h b/src/mongo/db/query/fle/encrypted_predicate.h index 5fcac966dbf..d6c379f6350 100644 --- a/src/mongo/db/query/fle/encrypted_predicate.h +++ b/src/mongo/db/query/fle/encrypted_predicate.h @@ -72,9 +72,6 @@ T parseFindPayload(BSONValue payload) { payload); } -std::unique_ptr makeTagDisjunction(ExpressionContext* expCtx, - std::vector&& tags); - /** * Convert a vector of PrfBlocks to a BSONArray for use in MatchExpression tag generation. */ diff --git a/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h b/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h index 5c06e89a0da..88a6c85983a 100644 --- a/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h +++ b/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h @@ -102,6 +102,5 @@ public: protected: MockServerRewrite _mock{}; - ExpressionContextForTest _expCtx; }; } // namespace mongo::fle diff --git a/src/mongo/db/query/fle/equality_predicate.cpp b/src/mongo/db/query/fle/equality_predicate.cpp index 88396949ee8..a349962145b 100644 --- a/src/mongo/db/query/fle/equality_predicate.cpp +++ b/src/mongo/db/query/fle/equality_predicate.cpp @@ -286,8 +286,20 @@ std::unique_ptr EqualityPredicate::rewriteToTagDisjunction(Expressio std::vector> orListElems; auto payload = constChild->getValue(); auto tags = toValues(generateTags(std::ref(payload))); - auto disjunction = makeTagDisjunction(_rewriter->getExpressionContext(), std::move(tags)); - + for (auto&& tagElt : tags) { + // ... and for each tag, construct expression {$in: [tag, + // "$__safeContent__"]}. + std::vector> inVec{ + ExpressionConstant::create(_rewriter->getExpressionContext(), tagElt), + ExpressionFieldPath::createPathFromString( + _rewriter->getExpressionContext(), + kSafeContent, + _rewriter->getExpressionContext()->variablesParseState)}; + orListElems.push_back( + make_intrusive(_rewriter->getExpressionContext(), std::move(inVec))); + } + auto disjunction = std::make_unique(_rewriter->getExpressionContext(), + std::move(orListElems)); if (eqExpr->getOp() == ExpressionCompare::NE) { std::vector> notChild{disjunction.release()}; return std::make_unique(_rewriter->getExpressionContext(), @@ -300,19 +312,27 @@ std::unique_ptr EqualityPredicate::rewriteToTagDisjunction(Expressio return nullptr; } auto& equalitiesList = inList->getChildren(); - std::vector allTags; + std::vector> orListElems; + auto expCtx = _rewriter->getExpressionContext(); for (auto& equality : equalitiesList) { // For each expression representing a FleFindPayload... if (auto constChild = dynamic_cast(equality.get())) { // ... rewrite the payload to a list of tags... auto payload = constChild->getValue(); auto tags = toValues(generateTags(std::ref(payload))); - allTags.insert(allTags.end(), - std::make_move_iterator(tags.begin()), - std::make_move_iterator(tags.end())); + for (auto&& tagElt : tags) { + // ... and for each tag, construct expression {$in: [tag, + // "$__safeContent__"]}. + std::vector> inVec{ + ExpressionConstant::create(expCtx, tagElt), + ExpressionFieldPath::createPathFromString( + expCtx, kSafeContent, expCtx->variablesParseState)}; + orListElems.push_back( + make_intrusive(expCtx, std::move(inVec))); + } } } - return makeTagDisjunction(_rewriter->getExpressionContext(), std::move(allTags)); + return std::make_unique(expCtx, std::move(orListElems)); } return nullptr; } diff --git a/src/mongo/db/query/fle/range_predicate.cpp b/src/mongo/db/query/fle/range_predicate.cpp index 184f4f4baa2..0ebf33790fc 100644 --- a/src/mongo/db/query/fle/range_predicate.cpp +++ b/src/mongo/db/query/fle/range_predicate.cpp @@ -35,7 +35,6 @@ #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_tags.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/fle/encrypted_predicate.h" namespace mongo::fle { @@ -43,9 +42,6 @@ namespace mongo::fle { REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE_WITH_FLAG(BETWEEN, RangePredicate, gFeatureFlagFLE2Range); -REGISTER_ENCRYPTED_AGG_PREDICATE_REWRITE_WITH_FLAG(ExpressionBetween, - RangePredicate, - gFeatureFlagFLE2Range); std::vector RangePredicate::generateTags(BSONValue payload) const { auto parsedPayload = parseFindPayload(payload); @@ -66,9 +62,7 @@ std::vector RangePredicate::generateTags(BSONValue payload) const { std::unique_ptr RangePredicate::rewriteToTagDisjunction( MatchExpression* expr) const { - tassert(6720900, - "Range rewrite should only be called with $between operator.", - expr->matchType() == MatchExpression::BETWEEN); + invariant(expr->matchType() == MatchExpression::BETWEEN); auto betExpr = static_cast(expr); auto payload = betExpr->rhs(); @@ -78,24 +72,9 @@ std::unique_ptr RangePredicate::rewriteToTagDisjunction( return makeTagDisjunction(toBSONArray(generateTags(payload))); } +// TODO: SERVER-67209 Server-side rewrite for agg expressions with $between. std::unique_ptr RangePredicate::rewriteToTagDisjunction(Expression* expr) const { - auto betweenExpr = dynamic_cast(expr); - tassert(6720901, "Range rewrite should only be called with $between operator.", betweenExpr); - auto children = betweenExpr->getChildren(); - uassert(6720902, "$between should have two children.", children.size() == 2); - - auto fieldpath = dynamic_cast(children[0].get()); - uassert(6720903, "first argument should be a fieldpath", fieldpath); - auto secondArg = dynamic_cast(children[1].get()); - uassert(6720904, "second argument should be a constant", secondArg); - auto payload = secondArg->getValue(); - - if (!isPayload(payload)) { - return nullptr; - } - auto tags = toValues(generateTags(std::ref(payload))); - - return makeTagDisjunction(_rewriter->getExpressionContext(), std::move(tags)); + return nullptr; } // TODO: SERVER-67267 Rewrite $between to $_internalFleBetween when number of tags exceeds diff --git a/src/mongo/db/query/fle/range_predicate_test.cpp b/src/mongo/db/query/fle/range_predicate_test.cpp index 17f33b55e52..bc8bca184f3 100644 --- a/src/mongo/db/query/fle/range_predicate_test.cpp +++ b/src/mongo/db/query/fle/range_predicate_test.cpp @@ -29,9 +29,6 @@ #include "mongo/crypto/fle_crypto.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/fle/encrypted_predicate.h" #include "mongo/db/query/fle/encrypted_predicate_test_fixtures.h" #include "mongo/db/query/fle/range_predicate.h" #include "mongo/idl/server_parameter_test_util.h" @@ -54,52 +51,39 @@ public: } - bool payloadValid = true; - protected: bool isPayload(const BSONElement& elt) const override { - return payloadValid; + return true; } bool isPayload(const Value& v) const override { - return payloadValid; + return true; } std::vector generateTags(BSONValue payload) const { return stdx::visit( - OverloadedVisitor{[&](BSONElement p) { - auto parsedPayload = p.Obj().firstElement(); - auto fieldName = parsedPayload.fieldNameStringData(); - - std::vector range; - auto payloadAsArray = parsedPayload.Array(); - for (auto&& elt : payloadAsArray) { - range.push_back(elt); - } - - std::vector allTags; - for (auto i = range[0].Number(); i <= range[1].Number(); i++) { - ASSERT(_tags.find({fieldName, i}) != _tags.end()); - auto temp = _tags.find({fieldName, i})->second; - for (auto tag : temp) { - allTags.push_back(tag); - } - } - return allTags; - }, - [&](std::reference_wrapper v) { - if (v.get().isArray()) { - auto arr = v.get().getArray(); - std::vector allTags; - for (auto& val : arr) { - allTags.push_back(PrfBlock( - {static_cast(val.coerceToInt())})); - } - return allTags; - } else { - return std::vector{}; - } - }}, + OverloadedVisitor{ + [&](BSONElement p) { + auto parsedPayload = p.Obj().firstElement(); + auto fieldName = parsedPayload.fieldNameStringData(); + + std::vector range; + auto payloadAsArray = parsedPayload.Array(); + for (auto&& elt : payloadAsArray) { + range.push_back(elt); + } + + std::vector allTags; + for (auto i = range[0].Number(); i <= range[1].Number(); i++) { + ASSERT(_tags.find({fieldName, i}) != _tags.end()); + auto temp = _tags.find({fieldName, i})->second; + for (auto tag : temp) { + allTags.push_back(tag); + } + } + return allTags; + }, + [&](std::reference_wrapper v) { return std::vector{}; }}, payload); } @@ -115,7 +99,7 @@ protected: MockRangePredicate _predicate; }; -TEST_F(RangePredicateRewriteTest, MatchRangeRewrite) { +TEST_F(RangePredicateRewriteTest, BasicRangeRewrite) { RAIIServerParameterControllerForTest controller("featureFlagFLE2Range", true); int start = 1; @@ -142,31 +126,5 @@ TEST_F(RangePredicateRewriteTest, MatchRangeRewrite) { assertRewriteToTags(_predicate, &inputExpr, toBSONArray(std::move(allTags))); } - -TEST_F(RangePredicateRewriteTest, AggRangeRewrite) { - auto input = fromjson(R"({$between: ["$age", {$literal: [1, 2, 3]}]})"); - auto inputExpr = - ExpressionBetween::parseExpression(&_expCtx, input, _expCtx.variablesParseState); - - auto expected = makeTagDisjunction(&_expCtx, toValues({{1}, {2}, {3}})); - - auto actual = _predicate.rewrite(inputExpr.get()); - - ASSERT_BSONOBJ_EQ(actual->serialize(false).getDocument().toBson(), - expected->serialize(false).getDocument().toBson()); -} - -TEST_F(RangePredicateRewriteTest, AggRangeRewriteNoOp) { - auto input = fromjson(R"({$between: ["$age", {$literal: [1, 2, 3]}]})"); - auto inputExpr = - ExpressionBetween::parseExpression(&_expCtx, input, _expCtx.variablesParseState); - - auto expected = inputExpr; - - _predicate.payloadValid = false; - auto actual = _predicate.rewrite(inputExpr.get()); - ASSERT(actual == nullptr); -} - }; // namespace } // namespace mongo::fle diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 3e8653eb229..e2f0fc91726 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -1437,7 +1437,10 @@ StatusWith> getExecutor( sbe::isQuerySbeCompatible(&mainColl, canonicalQuery.get(), plannerParams.options)); if (isEligibleForBonsai(*canonicalQuery, opCtx, mainColl)) { - return getSBEExecutorViaCascadesOptimizer(mainColl, std::move(canonicalQuery)); + return getSBEExecutorViaCascadesOptimizer(mainColl, + std::move(canonicalQuery), + plannerParams.options & + QueryPlannerParams::PRESERVE_RECORD_ID); } // Use SBE if 'canonicalQuery' is SBE compatible. @@ -1713,9 +1716,8 @@ StatusWith> getExecutorDele // The underlying query plan must preserve the record id, since it will be needed in order to // identify the record to update. - cq->setForceGenerateRecordId(true); + const size_t defaultPlannerOptions = QueryPlannerParams::PRESERVE_RECORD_ID; - const size_t defaultPlannerOptions = QueryPlannerParams::DEFAULT; ClassicPrepareExecutionHelper helper{ opCtx, collection, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; auto executionResult = helper.prepare(); @@ -1905,9 +1907,8 @@ StatusWith> getExecutorUpda // The underlying query plan must preserve the record id, since it will be needed in order to // identify the record to update. - cq->setForceGenerateRecordId(true); + const size_t defaultPlannerOptions = QueryPlannerParams::PRESERVE_RECORD_ID; - const size_t defaultPlannerOptions = QueryPlannerParams::DEFAULT; ClassicPrepareExecutionHelper helper{ opCtx, collection, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; auto executionResult = helper.prepare(); diff --git a/src/mongo/db/query/optimizer/README.md b/src/mongo/db/query/optimizer/README.md index ebffcc148e3..2568cb42b2d 100644 --- a/src/mongo/db/query/optimizer/README.md +++ b/src/mongo/db/query/optimizer/README.md @@ -13,7 +13,7 @@ The following C++ unit tests exercise relevant parts of the codebase: - algebra_test (src/mongo/db/query/optimizer/algebra/) - db_pipeline_test (src/mongo/db/pipeline/) - - This test suite includes many unrelated test cases, but - 'abt/abt_translation_test.cpp' and 'abt/abt_optimization_test.cpp' are the relevant ones. + 'abt/pipeline_test.cpp' is the relevant one. - optimizer_test (src/mongo/db/query/optimizer/) - sbe_abt_test (src/mongo/db/exec/sbe/abt/) @@ -34,7 +34,6 @@ exercising this codebase: [buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml](/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml) - **cqf_parallel**: [buildscripts/resmokeconfig/suites/cqf_parallel.yml](/buildscripts/resmokeconfig/suites/cqf_parallel.yml) - **cqf_passthrough**: [buildscripts/resmokeconfig/suites/cqf_passthrough.yml](/buildscripts/resmokeconfig/suites/cqf_passthrough.yml) -- **query_golden_cqf**: [buildscripts/resmokeconfig/suites/query_golden_cqf.yml](/buildscripts/resmokeconfig/suites/query_golden_cqf.yml) Desriptions of these suites can be found in [buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml](/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml). @@ -43,13 +42,13 @@ You may run these like so, adjusting the `-j` flag for the appropriate level of parallel execution for your machine. ``` ./buildscripts/resmoke.py run -j4 \ - --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,cqf_passthrough,query_golden_cqf + --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,cqf_passthrough ``` cqf_passthrough takes the longest to run by far, so this command may be more useful for a quicker signal: ``` -./buildscripts/resmoke.py run --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,query_golden_cqf -j4 +./buildscripts/resmoke.py run --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel -j4 ``` ## Local Testing Recommendation @@ -59,10 +58,10 @@ ninja install-devcore build/install/bin/algebra_test \ build/install/bin/db_pipeline_test build/install/bin/optimizer_test \ build/install/bin/sbe_abt_test \ && ./build/install/bin/algebra_test \ -&& ./build/install/bin/db_pipeline_test --fileNameFilter=abt/.* \ +&& ./build/install/bin/db_pipeline_test --fileNameFilter=pipeline_test.cpp \ && ./build/install/bin/optimizer_test \ && ./build/install/bin/sbe_abt_test \ -&& ./buildscripts/resmoke.py run --suites=cqf,cqf_parallel,cqf_disabled_pipeline_opt,query_golden_cqf -j4 +&& ./buildscripts/resmoke.py run --suites=cqf,cqf_parallel,cqf_disabled_pipeline_opt -j4 ``` **Note:** You may need to adjust the path to the unit test binary targets if your SCons install directory is something more like `build/opt/install/bin`. diff --git a/src/mongo/db/query/optimizer/SConscript b/src/mongo/db/query/optimizer/SConscript index 3791d2bdd0e..769cb2c3bdb 100644 --- a/src/mongo/db/query/optimizer/SConscript +++ b/src/mongo/db/query/optimizer/SConscript @@ -42,6 +42,7 @@ env.Library( 'utils/ce_math.cpp', "utils/interval_utils.cpp", "utils/memo_utils.cpp", + "utils/rewriter_utils.cpp", "utils/utils.cpp", ], LIBDEPS=[ @@ -71,16 +72,6 @@ env.CppUnitTest( "reference_tracker_test.cpp", "rewrites/path_optimizer_test.cpp", "interval_intersection_test.cpp", - ], - LIBDEPS=[ - "optimizer", - "unit_test_utils", - ], -) - -env.CppUnitTest( - target='optimizer_failure_test', - source=[ "optimizer_failure_test.cpp", ], LIBDEPS=[ diff --git a/src/mongo/db/query/optimizer/cascades/cost_derivation.cpp b/src/mongo/db/query/optimizer/cascades/cost_derivation.cpp index 35be4caa4a3..d212c32c5a7 100644 --- a/src/mongo/db/query/optimizer/cascades/cost_derivation.cpp +++ b/src/mongo/db/query/optimizer/cascades/cost_derivation.cpp @@ -124,7 +124,7 @@ public: const LogicalProps& childLogicalProps = _memo.getGroup(node.getGroupId())._logicalProperties; // Notice that unlike all physical nodes, this logical node takes it cardinality directly - // from the memo group logical property, ignoring _cardinalityEstimate. + // from the memo group logical property, igrnoring _cardinalityEstimate. CEType baseCE = getPropertyConst(childLogicalProps).getEstimate(); if (hasProperty(_physProps)) { diff --git a/src/mongo/db/query/optimizer/cascades/implementers.cpp b/src/mongo/db/query/optimizer/cascades/implementers.cpp index 8586fef8c61..997fe8d0ee2 100644 --- a/src/mongo/db/query/optimizer/cascades/implementers.cpp +++ b/src/mongo/db/query/optimizer/cascades/implementers.cpp @@ -1606,7 +1606,7 @@ private: setCollationForRIDIntersect( collationLeftRightSplit, leftPhysPropsLocal, rightPhysPropsLocal); - optimizeChildren( + optimizeChildren( _queue, kDefaultPriority, std::move(physicalJoin), diff --git a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp index afed7401960..dcd744b14ef 100644 --- a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp +++ b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp @@ -690,13 +690,11 @@ static void convertFilterToSargableNode(ABT::reference_type node, } } + // If the filter has no constraints after removing no-ops, then rewrite the filter with a + // predicate using the constant True. if (conversion->_reqMap.empty()) { - // If the filter has no constraints after removing no-ops, then replace with its child. We - // need to copy the child since we hold it by reference from the memo, and during - // subtitution the current group will be erased. - - ABT newNode = filterNode.getChild(); - ctx.addNode(newNode, true /*substitute*/); + ctx.addNode(make(Constant::boolean(true), filterNode.getChild()), + true /*subtitute*/); return; } diff --git a/src/mongo/db/query/optimizer/cascades/memo.cpp b/src/mongo/db/query/optimizer/cascades/memo.cpp index ec31837a919..c702175bd4e 100644 --- a/src/mongo/db/query/optimizer/cascades/memo.cpp +++ b/src/mongo/db/query/optimizer/cascades/memo.cpp @@ -87,6 +87,17 @@ const ABTVector& OrderPreservingABTSet::getVector() const { return _vector; } +PhysRewriteEntry::PhysRewriteEntry(const double priority, + PhysicalRewriteType rule, + ABT node, + std::vector> childProps, + NodeCEMap nodeCEMap) + : _priority(priority), + _rule(rule), + _node(std::move(node)), + _childProps(std::move(childProps)), + _nodeCEMap(std::move(nodeCEMap)) {} + PhysOptimizationResult::PhysOptimizationResult() : PhysOptimizationResult(0, {}, CostType::kInfinity) {} @@ -226,14 +237,10 @@ public: // noop } - GroupIdType transport(const ABT& n, + GroupIdType transport(const ABT& /*n*/, const MemoLogicalDelegatorNode& node, - const VariableEnvironment& env) { - if (_targetGroupMap.count(n.ref()) == 0) { - return node.getGroupId(); - } - - return addNodes(n, node, n, env, {}); + const VariableEnvironment& /*env*/) { + return node.getGroupId(); } void prepare(const ABT& n, const FilterNode& node, const VariableEnvironment& /*env*/) { @@ -621,6 +628,9 @@ std::pair Memo::addNode(GroupIdType groupId, ABT n, LogicalRewriteType rule) { uassert(6624052, "Attempting to insert a physical node", !n.is()); + uassert(6624053, + "Attempting to insert a logical delegator node", + !n.is()); Group& group = *_groups.at(groupId); OrderPreservingABTSet& nodes = group._logicalNodes; diff --git a/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp b/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp index 4388f015f1a..82eedbd4ec7 100644 --- a/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp +++ b/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp @@ -33,6 +33,7 @@ #include "mongo/db/query/optimizer/cascades/implementers.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/utils/rewriter_utils.h" namespace mongo::optimizer::cascades { @@ -138,21 +139,21 @@ static void printCandidateInfo(const ABT& node, } } -void PhysicalRewriter::costAndRetainBestNode(std::unique_ptr node, +void PhysicalRewriter::costAndRetainBestNode(ABT node, ChildPropsType childProps, NodeCEMap nodeCEMap, const PhysicalRewriteType rule, const GroupIdType groupId, PrefixId& prefixId, PhysOptimizationResult& bestResult) { - const CostAndCE nodeCostAndCE = _costDerivation.deriveCost( - _memo, bestResult._physProps, node->ref(), childProps, nodeCEMap); + const CostAndCE nodeCostAndCE = + _costDerivation.deriveCost(_memo, bestResult._physProps, node.ref(), childProps, nodeCEMap); const CostType nodeCost = nodeCostAndCE._cost; uassert(6624056, "Must get non-infinity cost for physical node.", !nodeCost.isInfinite()); if (_memo.getDebugInfo().hasDebugLevel(3)) { std::cout << "Requesting optimization\n"; - printCandidateInfo(*node, groupId, nodeCost, childProps, bestResult); + printCandidateInfo(node, groupId, nodeCost, childProps, bestResult); } const CostType childCostLimit = @@ -165,13 +166,14 @@ void PhysicalRewriter::costAndRetainBestNode(std::unique_ptr node, std::cout << (success ? (improvement ? "Improved" : "Did not improve") : "Failed optimizing") << "\n"; - printCandidateInfo(*node, groupId, nodeCost, childProps, bestResult); + printCandidateInfo(node, groupId, nodeCost, childProps, bestResult); } tassert(6678300, "Retaining node with uninitialized rewrite rule", rule != cascades::PhysicalRewriteType::Uninitialized); - PhysNodeInfo candidateNodeInfo{std::move(*node), cost, nodeCost, nodeCostAndCE._ce, rule}; + PhysNodeInfo candidateNodeInfo{ + unwrapConstFilter(std::move(node)), cost, nodeCost, nodeCostAndCE._ce, rule}; const bool keepRejectedPlans = _hints._keepRejectedPlans; if (improvement) { if (keepRejectedPlans && bestResult._nodeInfo) { @@ -377,7 +379,7 @@ PhysicalRewriter::OptimizeGroupResult PhysicalRewriter::optimizeGroup(const Grou NodeCEMap nodeCEMap = std::move(rewrite._nodeCEMap); if (nodeCEMap.empty()) { nodeCEMap.emplace( - rewrite._node->cast(), + rewrite._node.cast(), getPropertyConst(logicalProps).getEstimate()); } diff --git a/src/mongo/db/query/optimizer/cascades/physical_rewriter.h b/src/mongo/db/query/optimizer/cascades/physical_rewriter.h index cb1b1b395af..205ecc62c3a 100644 --- a/src/mongo/db/query/optimizer/cascades/physical_rewriter.h +++ b/src/mongo/db/query/optimizer/cascades/physical_rewriter.h @@ -71,7 +71,7 @@ public: CostType costLimit); private: - void costAndRetainBestNode(std::unique_ptr node, + void costAndRetainBestNode(ABT node, ChildPropsType childProps, NodeCEMap nodeCEMap, PhysicalRewriteType rule, diff --git a/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp b/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp index 6475b704d0f..c27ed44dd84 100644 --- a/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp +++ b/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp @@ -29,6 +29,7 @@ #include "mongo/db/query/optimizer/cascades/rewrite_queues.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/utils/memo_utils.h" #include namespace mongo::optimizer::cascades { @@ -57,39 +58,14 @@ bool LogicalRewriteEntryComparator::operator()( return x->_nodeId._index < y->_nodeId._index; } -PhysRewriteEntry::PhysRewriteEntry(const double priority, - PhysicalRewriteType rule, - std::unique_ptr node, - std::vector> childProps, - NodeCEMap nodeCEMap) - : _priority(priority), - _rule(rule), - _node(std::move(node)), - _childProps(std::move(childProps)), - _nodeCEMap(std::move(nodeCEMap)) {} - void optimizeChildrenNoAssert(PhysRewriteQueue& queue, const double priority, const PhysicalRewriteType rule, - std::unique_ptr node, + ABT node, ChildPropsType childProps, NodeCEMap nodeCEMap) { queue.emplace(std::make_unique( priority, rule, std::move(node), std::move(childProps), std::move(nodeCEMap))); } -void optimizeChildrenNoAssert(PhysRewriteQueue& queue, - double priority, - PhysicalRewriteType rule, - ABT node, - ChildPropsType childProps, - NodeCEMap nodeCEMap) { - optimizeChildrenNoAssert(queue, - priority, - rule, - std::make_unique(std::move(node)), - std::move(childProps), - std::move(nodeCEMap)); -} - } // namespace mongo::optimizer::cascades diff --git a/src/mongo/db/query/optimizer/cascades/rewrite_queues.h b/src/mongo/db/query/optimizer/cascades/rewrite_queues.h index 732d338a153..b8e6ec66530 100644 --- a/src/mongo/db/query/optimizer/cascades/rewrite_queues.h +++ b/src/mongo/db/query/optimizer/cascades/rewrite_queues.h @@ -33,6 +33,7 @@ #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/utils/rewriter_utils.h" namespace mongo::optimizer::cascades { @@ -74,7 +75,7 @@ static constexpr double kDefaultPriority = 10.0; struct PhysRewriteEntry { PhysRewriteEntry(double priority, PhysicalRewriteType rule, - std::unique_ptr node, + ABT node, ChildPropsType childProps, NodeCEMap nodeCEMap); @@ -87,15 +88,9 @@ struct PhysRewriteEntry { // Rewrite rule that triggered this entry. PhysicalRewriteType _rule; - // Node we are optimizing. This is typically a single node such as Filter with a - // MemoLogicalDelegator child, but could be a more complex tree. - std::unique_ptr _node; - // For each child to optimize, we have associated physical properties. If we are optimizing the - // node under new properties (e.g. via enforcement) the map will contain a single entry using - // the address of the node itself (as opposed to the children to optimize). + ABT _node; ChildPropsType _childProps; - // Optional per-node CE. Used if the node is complex tree. NodeCEMap _nodeCEMap; }; @@ -108,13 +103,6 @@ using PhysRewriteQueue = std::priority_queue, std::vector>, PhysRewriteEntryComparator>; -void optimizeChildrenNoAssert(PhysRewriteQueue& queue, - double priority, - PhysicalRewriteType rule, - std::unique_ptr node, - ChildPropsType childProps, - NodeCEMap nodeCEMap); - void optimizeChildrenNoAssert(PhysRewriteQueue& queue, double priority, PhysicalRewriteType rule, @@ -128,8 +116,7 @@ static void optimizeChildren(PhysRewriteQueue& queue, ABT node, ChildPropsType childProps) { static_assert(canBePhysicalNode(), "Can only optimize a physical node."); - optimizeChildrenNoAssert( - queue, priority, rule, std::move(node), std::move(childProps), {} /*nodeCEMap*/); + optimizeChildrenNoAssert(queue, priority, rule, std::move(node), std::move(childProps), {}); } template @@ -144,7 +131,7 @@ static void optimizeChild(PhysRewriteQueue& queue, template static void optimizeChild(PhysRewriteQueue& queue, const double priority, ABT node) { - optimizeChildren(queue, priority, std::move(node), {} /*nodeCEMap*/); + optimizeChildren(queue, priority, std::move(node), {}); } @@ -153,10 +140,8 @@ void optimizeUnderNewProperties(cascades::PhysRewriteQueue& queue, const double priority, ABT child, properties::PhysProps props) { - auto nodePtr = std::make_unique(std::move(child)); - ChildPropsType childProps{{nodePtr.get(), std::move(props)}}; - optimizeChildrenNoAssert( - queue, priority, rule, std::move(nodePtr), std::move(childProps), {} /*nodeCEMap*/); + optimizeChild( + queue, priority, wrapConstFilter(std::move(child)), std::move(props)); } template diff --git a/src/mongo/db/query/optimizer/cascades/rewriter_rules.h b/src/mongo/db/query/optimizer/cascades/rewriter_rules.h index 7f6b0cdb07b..311c38c2081 100644 --- a/src/mongo/db/query/optimizer/cascades/rewriter_rules.h +++ b/src/mongo/db/query/optimizer/cascades/rewriter_rules.h @@ -115,7 +115,7 @@ MAKE_PRINTABLE_ENUM_STRING_ARRAY(LogicalRewriterTypeEnum, F(RIDIntersectMergeJoin) \ F(RIDIntersectHashJoin) \ F(RIDIntersectGroupBy) \ - F(IndexFetch) + F(RIDIntersectNLJ) MAKE_PRINTABLE_ENUM(PhysicalRewriteType, PHYSICALREWRITER_NAMES); MAKE_PRINTABLE_ENUM_STRING_ARRAY(PhysicalRewriterTypeEnum, diff --git a/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp b/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp index 6f4f90a6a50..bd6049947ea 100644 --- a/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp @@ -1546,6 +1546,8 @@ TEST(LogicalRewriter, RemoveNoopFilter) { "| | ptest\n" "| RefBlock: \n" "| Variable [ptest]\n" + "Filter []\n" + "| Const [true]\n" "Scan [test]\n" " BindBlock:\n" " [ptest]\n" diff --git a/src/mongo/db/query/optimizer/optimizer_failure_test.cpp b/src/mongo/db/query/optimizer/optimizer_failure_test.cpp index 3b57295d606..14c1a4a4cc6 100644 --- a/src/mongo/db/query/optimizer/optimizer_failure_test.cpp +++ b/src/mongo/db/query/optimizer/optimizer_failure_test.cpp @@ -29,9 +29,13 @@ #include "mongo/db/query/optimizer/cascades/ce_hinted.h" #include "mongo/db/query/optimizer/cascades/cost_derivation.h" +#include "mongo/db/query/optimizer/explain.h" #include "mongo/db/query/optimizer/node.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/syntax/syntax_fwd_declare.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" #include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/unittest/death_test.h" @@ -257,7 +261,7 @@ DEATH_TEST_REGEX(Optimizer, OptGroupFailed, "Tripwire assertion.*6808706") { OptPhaseManager phaseManager( {OptPhase::MemoExplorationPhase, OptPhase::MemoImplementationPhase}, prefixId, - true /*requireRID*/, + true, {{{"test", {{}, {}}}}}, std::make_unique(std::move(hints)), std::make_unique(), diff --git a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp index bb4d17bac3a..b5f52759993 100644 --- a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp @@ -1075,9 +1075,9 @@ TEST(PhysRewriter, FilterIndexing2NonSarg) { LogicalRewriteType::FilterRIDIntersectReorder}; PhysicalRewriteType physicalRules[] = {PhysicalRewriteType::Seek, PhysicalRewriteType::Seek, - PhysicalRewriteType::IndexFetch, + PhysicalRewriteType::RIDIntersectNLJ, PhysicalRewriteType::Evaluation, - PhysicalRewriteType::IndexFetch, + PhysicalRewriteType::RIDIntersectNLJ, PhysicalRewriteType::Root, PhysicalRewriteType::SargableToIndex, PhysicalRewriteType::SargableToIndex, @@ -3876,7 +3876,6 @@ TEST(PhysRewriter, ArrayConstantIndex) { // Demonstrate we get index bounds to handle the array constant, while we also retain the // original filter. We have index bound with the array itself unioned with bound using the first // array element. - // TODO SERVER-70120: Reduce GroupBy Unique to just GroupBy. ASSERT_EXPLAIN_V2( "Root []\n" "| | projections: \n" @@ -5292,143 +5291,6 @@ TEST(PhysRewriter, RootInterval) { optimized); } -TEST(PhysRewriter, EqMemberSargable) { - using namespace properties; - - ABT scanNode = make("root", "c1"); - - const auto [tag, val] = sbe::value::makeNewArray(); - sbe::value::Array* arr = sbe::value::getArrayView(val); - for (int i = 1; i < 4; i++) { - arr->push_back(sbe::value::TypeTags::NumberInt32, i); - } - ABT arrayConst = make(tag, val); - - ABT filterNode = make( - make( - make("a", - make(make(Operations::EqMember, arrayConst), - PathTraverse::kSingleLevel)), - make("root")), - std::move(scanNode)); - ABT rootNode = - make(ProjectionRequirement{ProjectionNameVector{"root"}}, std::move(filterNode)); - - { - PrefixId prefixId; - OptPhaseManager phaseManager( - {OptPhase::MemoSubstitutionPhase}, - prefixId, - {{{"c1", - ScanDefinition{ - {}, - {{"index1", - makeIndexDefinition("a", CollationOp::Ascending, false /*isMultiKey*/)}}}}}}, - {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); - - ABT optimized = rootNode; - phaseManager.optimize(optimized); - - ASSERT_EXPLAIN_V2( - "Root []\n" - "| | projections: \n" - "| | root\n" - "| RefBlock: \n" - "| Variable [root]\n" - "Sargable [Complete]\n" - "| | | | | requirementsMap: \n" - "| | | | | refProjection: root, path: 'PathGet [a] PathIdentity []', " - "intervals: {{{[Const [1], Const [1]]}} U {{[Const [2], Const [2]]}} U {{[Const [3], " - "Const " - "[3]]}}}\n" - "| | | | candidateIndexes: \n" - "| | | | candidateId: 1, index1, {}, {0}, {{{[Const [1], Const [1]]}} U " - "{{[Const [2], Const [2]]}} U {{[Const [3], Const [3]]}}}\n" - "| | | scanParams: \n" - "| | | {'a': evalTemp_0}\n" - "| | | residualReqs: \n" - "| | | refProjection: evalTemp_0, path: 'PathIdentity []', " - "intervals: " - "{{{[Const [1], Const [1]]}} U {{[Const [2], Const [2]]}} U {{[Const [3], Const " - "[3]]}}}, " - "entryIndex: 0\n" - "| | BindBlock:\n" - "| RefBlock: \n" - "| Variable [root]\n" - "Scan [c1]\n" - " BindBlock:\n" - " [root]\n" - " Source []\n", - optimized); - } - - { - PrefixId prefixId; - OptPhaseManager phaseManager( - {OptPhase::MemoSubstitutionPhase, - OptPhase::MemoExplorationPhase, - OptPhase::MemoImplementationPhase}, - prefixId, - {{{"c1", - ScanDefinition{{}, - {{"index1", makeIndexDefinition("a", CollationOp::Ascending)}}}}}}, - {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); - - ABT optimized = rootNode; - phaseManager.optimize(optimized); - ASSERT_EQ(4, phaseManager.getMemo().getStats()._physPlanExplorationCount); - - // Test sargable filter is satisfied with an index scan. - // TODO SERVER-70120: Reduce GroupBy Unique to just GroupBy. - ASSERT_EXPLAIN_V2( - "Root []\n" - "| | projections: \n" - "| | root\n" - "| RefBlock: \n" - "| Variable [root]\n" - "BinaryJoin [joinType: Inner, {rid_0}]\n" - "| | Const [true]\n" - "| LimitSkip []\n" - "| | limitSkip:\n" - "| | limit: 1\n" - "| | skip: 0\n" - "| Seek [ridProjection: rid_0, {'': root}, c1]\n" - "| | BindBlock:\n" - "| | [root]\n" - "| | Source []\n" - "| RefBlock: \n" - "| Variable [rid_0]\n" - "Unique []\n" - "| projections: \n" - "| rid_0\n" - "GroupBy []\n" - "| | groupings: \n" - "| | RefBlock: \n" - "| | Variable [rid_0]\n" - "| aggregations: \n" - "Union []\n" - "| | | BindBlock:\n" - "| | | [rid_0]\n" - "| | | Source []\n" - "| | IndexScan [{'': rid_0}, scanDefName: c1, indexDefName: index1, interval: " - "{[Const [3], Const [3]]}]\n" - "| | BindBlock:\n" - "| | [rid_0]\n" - "| | Source []\n" - "| IndexScan [{'': rid_0}, scanDefName: c1, indexDefName: index1, interval: " - "{[Const [2], Const [2]]}]\n" - "| BindBlock:\n" - "| [rid_0]\n" - "| Source []\n" - "IndexScan [{'': rid_0}, scanDefName: c1, indexDefName: index1, interval: {[Const " - "[1], Const [1]]}]\n" - " BindBlock:\n" - " [rid_0]\n" - " Source []\n", - optimized); - } -} - TEST(PhysRewriter, IndexSubfieldCovered) { using namespace properties; diff --git a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp index ff87d3b5bbd..5ced7f0d93f 100644 --- a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp +++ b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp @@ -483,18 +483,6 @@ void ConstEval::transport(ABT&, const LambdaAbstraction&, ABT&) { --_inCostlyCtx; } -void ConstEval::transport(ABT& n, const FilterNode& op, ABT& child, ABT& expr) { - if (expr == Constant::boolean(true)) { - // Remove trivially true filter. - - // First, pull out the child and put in a blackhole. - auto result = std::exchange(child, make()); - - // Replace the filter node itself with the extracted child. - swapAndUpdate(n, std::move(result)); - } -} - void ConstEval::transport(ABT& n, const EvaluationNode& op, ABT& child, ABT& expr) { if (_noRefProj.erase(&op)) { // The evaluation node is unused so replace it with its own child. diff --git a/src/mongo/db/query/optimizer/rewrites/const_eval.h b/src/mongo/db/query/optimizer/rewrites/const_eval.h index 3c31bc93f86..171b7665fa1 100644 --- a/src/mongo/db/query/optimizer/rewrites/const_eval.h +++ b/src/mongo/db/query/optimizer/rewrites/const_eval.h @@ -64,8 +64,6 @@ public: void transport(ABT& n, const BinaryOp& op, ABT& lhs, ABT& rhs); void transport(ABT& n, const FunctionCall& op, std::vector& args); void transport(ABT& n, const If& op, ABT& cond, ABT& thenBranch, ABT& elseBranch); - - void transport(ABT& n, const FilterNode& op, ABT& child, ABT& expr); void transport(ABT& n, const EvaluationNode& op, ABT& child, ABT& expr); void prepare(ABT&, const PathTraverse&); diff --git a/src/mongo/db/query/optimizer/syntax/expr.cpp b/src/mongo/db/query/optimizer/syntax/expr.cpp index 94b8b771ef2..dd7e0258816 100644 --- a/src/mongo/db/query/optimizer/syntax/expr.cpp +++ b/src/mongo/db/query/optimizer/syntax/expr.cpp @@ -28,7 +28,6 @@ */ #include "mongo/db/query/optimizer/syntax/expr.h" -#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/optimizer/node.h" #include "mongo/platform/decimal128.h" @@ -52,11 +51,6 @@ Constant::Constant(Constant&& other) noexcept { other._val = 0; } -ABT Constant::createFromCopy(const sbe::value::TypeTags tag, const sbe::value::Value val) { - auto copy = sbe::value::copyValue(tag, val); - return make(copy.first, copy.second); -} - ABT Constant::str(std::string str) { // Views are non-owning so we have to make a copy. auto [tag, val] = makeNewString(str); diff --git a/src/mongo/db/query/optimizer/syntax/expr.h b/src/mongo/db/query/optimizer/syntax/expr.h index 10046f183e0..289479ec5a1 100644 --- a/src/mongo/db/query/optimizer/syntax/expr.h +++ b/src/mongo/db/query/optimizer/syntax/expr.h @@ -49,8 +49,6 @@ class Constant final : public Operator<0>, public ExpressionSyntaxSort { public: Constant(sbe::value::TypeTags tag, sbe::value::Value val); - static ABT createFromCopy(sbe::value::TypeTags tag, sbe::value::Value val); - static ABT str(std::string str); static ABT int32(int32_t valueInt32); diff --git a/src/mongo/db/query/optimizer/utils/interval_utils.cpp b/src/mongo/db/query/optimizer/utils/interval_utils.cpp index e46029b7c6a..6d56b21e814 100644 --- a/src/mongo/db/query/optimizer/utils/interval_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/interval_utils.cpp @@ -330,54 +330,4 @@ bool combineCompoundIntervalsDNF(CompoundIntervalReqExpr::Node& targetIntervals, return true; } -boost::optional coerceIntervalToPathCompareEqMember(const IntervalReqExpr::Node& interval) { - // Create the array that EqMember will use to hold the members. - auto [eqMembersTag, eqMembersVal] = sbe::value::makeNewArray(); - sbe::value::ValueGuard guard{eqMembersTag, eqMembersVal}; - auto eqMembersArray = sbe::value::getArrayView(eqMembersVal); - - // An EqMember is a disjunction of conjunctions of atoms (point intervals). For example [1, 1] U - // [2, 2] U [3, 3] However each conjunction should only have one atom child, so we can think of - // it as a disjunction of point intervals instead. - if (const auto disj = interval.cast()) { - // We only make an EqMember if we have 2 or more comparisons. - if (disj->nodes().size() < 2) { - return boost::none; - } - - for (const auto& child : disj->nodes()) { - if (!child.is()) { - return boost::none; - } - - // Check that the conjunction has one atom child. - const auto conjChild = child.cast(); - if (conjChild->nodes().size() != 1 || - !conjChild->nodes().front().is()) { - return boost::none; - } - - // Check that the atom is a point interval, and the bound is a constant. - const auto atomChild = conjChild->nodes().front().cast(); - if (!atomChild->getExpr().isEquality() || - !atomChild->getExpr().getLowBound().getBound().is()) { - return boost::none; - } - - const auto constAtomChildPair = - atomChild->getExpr().getLowBound().getBound().cast()->get(); - - // Make a copy of the point bound, insert it into our EqMember members. - const auto newEqMember = copyValue(constAtomChildPair.first, constAtomChildPair.second); - eqMembersArray->push_back(newEqMember.first, newEqMember.second); - } - - // If we got to this point, we have successfully coerced the interval into an EqMember! - // Reset the guard so the members array doesn't get deleted. - guard.reset(); - return make(Operations::EqMember, make(eqMembersTag, eqMembersVal)); - } - return boost::none; -} - } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/utils/interval_utils.h b/src/mongo/db/query/optimizer/utils/interval_utils.h index 2c071cf84c6..397b9a264f8 100644 --- a/src/mongo/db/query/optimizer/utils/interval_utils.h +++ b/src/mongo/db/query/optimizer/utils/interval_utils.h @@ -68,9 +68,4 @@ bool combineCompoundIntervalsDNF(CompoundIntervalReqExpr::Node& targetIntervals, const IntervalReqExpr::Node& sourceIntervals, bool reverseSource = false); -/** - * Analyze the given interval, and convert it into a PathCompare EqMember if possible. - */ -boost::optional coerceIntervalToPathCompareEqMember(const IntervalReqExpr::Node& interval); - } // namespace mongo::optimizer diff --git a/src/mongo/db/serverless/serverless_server_status.cpp b/src/mongo/db/query/optimizer/utils/rewriter_utils.cpp similarity index 65% rename from src/mongo/db/serverless/serverless_server_status.cpp rename to src/mongo/db/query/optimizer/utils/rewriter_utils.cpp index 8d0d4658dc3..88c1427c8df 100644 --- a/src/mongo/db/serverless/serverless_server_status.cpp +++ b/src/mongo/db/query/optimizer/utils/rewriter_utils.cpp @@ -27,31 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/db/query/optimizer/utils/rewriter_utils.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/commands/server_status.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" -namespace mongo { -namespace { +namespace mongo::optimizer { -class ServerlessServerStatus final : public ServerStatusSection { -public: - ServerlessServerStatus() : ServerStatusSection("serverless") {} +ABT wrapConstFilter(ABT node) { + return make(Constant::boolean(true), std::move(node)); +} - bool includeByDefault() const override { - return false; +ABT unwrapConstFilter(ABT node) { + if (auto nodePtr = node.cast(); + nodePtr != nullptr && nodePtr->getFilter() == Constant::boolean(true)) { + return nodePtr->getChild(); } + return node; +} - BSONObj generateSection(OperationContext* opCtx, - const BSONElement& configElement) const override { - BSONObjBuilder result; - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .appendInfoForServerStatus(&result); - return result.obj(); - } -} serverlessServerStatus; - -} // namespace -} // namespace mongo +} // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/utils/rewriter_utils.h b/src/mongo/db/query/optimizer/utils/rewriter_utils.h new file mode 100644 index 00000000000..e81190a5ce6 --- /dev/null +++ b/src/mongo/db/query/optimizer/utils/rewriter_utils.h @@ -0,0 +1,40 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/query/optimizer/node.h" + + +namespace mongo::optimizer { + +ABT wrapConstFilter(ABT node); +ABT unwrapConstFilter(ABT node); + +} // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp b/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp index c857f6fd355..c060535acf6 100644 --- a/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp @@ -203,7 +203,7 @@ void serializeMetadata(std::ostream& stream, Metadata metadata) { // The ScanDefinitions are stored in an unordered map, and the order of the ScanDefinitions in // the golden file must be the same every time the test is run. std::map orderedScanDefs; - for (const auto& element : metadata._scanDefs) { + for (auto element : metadata._scanDefs) { orderedScanDefs.insert(element); } diff --git a/src/mongo/db/query/optimizer/utils/utils.cpp b/src/mongo/db/query/optimizer/utils/utils.cpp index 4fe66b6192e..8d8b6167681 100644 --- a/src/mongo/db/query/optimizer/utils/utils.cpp +++ b/src/mongo/db/query/optimizer/utils/utils.cpp @@ -29,13 +29,11 @@ #include "mongo/db/query/optimizer/utils/utils.h" -#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/syntax/path.h" -#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" #include "mongo/db/storage/storage_parameters_gen.h" @@ -526,7 +524,9 @@ public: return {}; } - ABT elementBound = Constant::createFromCopy(arr->getAt(0).first, arr->getAt(0).second); + const auto [elTag, elVal] = arr->getAt(0); + const auto [elTagCopy, elValCopy] = sbe::value::copyValue(elTag, elVal); + ABT elementBound = make(elTagCopy, elValCopy); // Create new interval which uses the first element of the array. const IntervalReqExpr::Node& newInterval = IntervalReqExpr::makeSingularDNF(IntervalRequirement{ @@ -655,48 +655,6 @@ public: return result; } - /** - * Convert to PathCompare EqMember to partial schema requirements if possible. - */ - ResultType makeEqMemberInterval(const ABT& bound) { - const auto boundConst = bound.cast(); - if (boundConst == nullptr) { - return {}; - } - - const auto [boundTag, boundVal] = boundConst->get(); - if (boundTag != sbe::value::TypeTags::Array) { - return {}; - } - const auto boundArray = sbe::value::getArrayView(boundVal); - - // Union the single intervals together. If we have PathCompare [EqMember] Const [[1, 2, 3]] - // we create [1, 1] U [2, 2] U [3, 3]. - boost::optional unionedInterval; - - for (size_t i = 0; i < boundArray->size(); i++) { - auto singleBoundLow = - Constant::createFromCopy(boundArray->getAt(i).first, boundArray->getAt(i).second); - auto singleBoundHigh = singleBoundLow; - - auto singleInterval = IntervalReqExpr::makeSingularDNF( - IntervalRequirement{{true /*inclusive*/, std::move(singleBoundLow)}, - {true /*inclusive*/, std::move(singleBoundHigh)}}); - - if (unionedInterval) { - // Union the singleInterval with the unionedInterval we want to update. - combineIntervalsDNF(false /*intersect*/, *unionedInterval, singleInterval); - } else { - unionedInterval = std::move(singleInterval); - } - } - - return {{PartialSchemaRequirements{ - {PartialSchemaKey{"" /*projectionName*/, make()}, - PartialSchemaRequirement{ - "" /*boundProjectionName*/, std::move(*unionedInterval), false /*isPerfOnly*/}}}}}; - } - ResultType transport(const ABT& n, const PathCompare& pathCompare, ResultType inputResult) { if (!inputResult) { return {}; @@ -713,9 +671,6 @@ public: const Operations op = pathCompare.op(); switch (op) { - case Operations::EqMember: - return makeEqMemberInterval(bound); - case Operations::Eq: lowBound = bound; highBound = bound; @@ -1487,13 +1442,12 @@ void lowerPartialSchemaRequirement(const PartialSchemaKey& key, ABT path = make(); if (pathToInterval) { // If we have a path converter, attempt to convert bounds back into a path element. - if (auto conversion = pathToInterval(make()); *conversion == req.getIntervals()) { + if (auto conversion = pathToInterval(make()); + conversion && *conversion == req.getIntervals()) { path = make(); } else if (auto conversion = pathToInterval(make()); - *conversion == req.getIntervals()) { + conversion && *conversion == req.getIntervals()) { path = make(); - } else if (auto conversion = coerceIntervalToPathCompareEqMember(req.getIntervals())) { - path = std::move(*conversion); } } if (path.is()) { diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp index 7cbc7116a92..4b19c1fac59 100644 --- a/src/mongo/db/query/planner_analysis.cpp +++ b/src/mongo/db/query/planner_analysis.cpp @@ -585,7 +585,7 @@ bool canUseSimpleSort(const QuerySolutionNode& solnRoot, // record ids along through the sorting process is wasted work when these ids will never be // consumed later in the execution of the query. If the record ids are needed, however, then // we can't use the simple sort stage. - !cq.getForceGenerateRecordId(); + !(plannerParams.options & QueryPlannerParams::PRESERVE_RECORD_ID); } boost::optional attemptToGetProjectionFromQuerySolution( diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp index b330ad4bedf..c0e787c6d99 100644 --- a/src/mongo/db/query/query_planner.cpp +++ b/src/mongo/db/query/query_planner.cpp @@ -463,6 +463,9 @@ string optionString(size_t options) { case QueryPlannerParams::STRICT_DISTINCT_ONLY: ss << "STRICT_DISTINCT_ONLY "; break; + case QueryPlannerParams::PRESERVE_RECORD_ID: + ss << "PRESERVE_RECORD_ID "; + break; case QueryPlannerParams::ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG: ss << "ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG "; break; diff --git a/src/mongo/db/query/query_planner_options_test.cpp b/src/mongo/db/query/query_planner_options_test.cpp index 7f4cb9e3a1b..7471af9e7a0 100644 --- a/src/mongo/db/query/query_planner_options_test.cpp +++ b/src/mongo/db/query/query_planner_options_test.cpp @@ -859,7 +859,7 @@ TEST_F(QueryPlannerTest, DollarResumeAfterFieldPropagatedFromQueryRequestToStage } TEST_F(QueryPlannerTest, PreserveRecordIdOptionPrecludesSimpleSort) { - forceRecordId = true; + params.options |= QueryPlannerParams::PRESERVE_RECORD_ID; runQueryAsCommand(fromjson("{find: 'testns', sort: {a:1}}")); diff --git a/src/mongo/db/query/query_planner_params.h b/src/mongo/db/query/query_planner_params.h index d055a1ef832..c826a92eb12 100644 --- a/src/mongo/db/query/query_planner_params.h +++ b/src/mongo/db/query/query_planner_params.h @@ -125,9 +125,15 @@ struct QueryPlannerParams { // declaration of getExecutorDistinct() for more detail. STRICT_DISTINCT_ONLY = 1 << 8, + // Instruct the planner that the caller is expecting to consume the record ids associated + // with documents returned by the plan. Any generated query solution must not discard record + // ids. In some cases, record ids can be discarded as an optimization when they will not be + // consumed downstream. + PRESERVE_RECORD_ID = 1 << 9, + // Set this on an oplog scan to uassert that the oplog has not already rolled over the // minimum 'ts' timestamp specified in the query. - ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG = 1 << 9, + ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG = 1 << 10, // Instruct the plan enumerator to enumerate contained $ors in a special order. $or // enumeration can generate an exponential number of plans, and is therefore limited at some @@ -144,16 +150,16 @@ struct QueryPlannerParams { // order, we would get assignments [a_b, a_b], [a_c, a_c], [a_c, a_b], then [a_b, a_c]. This // is thought to be helpful in general, but particularly in cases where all children of the // $or use the same fields and have the same indexes available, as in this example. - ENUMERATE_OR_CHILDREN_LOCKSTEP = 1 << 10, + ENUMERATE_OR_CHILDREN_LOCKSTEP = 1 << 11, // Ensure that any plan generated returns data that is "owned." That is, all BSONObjs are // in an "owned" state and are not pointing to data that belongs to the storage engine. - RETURN_OWNED_DATA = 1 << 11, + RETURN_OWNED_DATA = 1 << 12, // When generating column scan queries, splits match expressions so that the filters can be // applied per-column. This is off by default, since the execution side doesn't support it // yet. - GENERATE_PER_COLUMN_FILTERS = 1 << 12, + GENERATE_PER_COLUMN_FILTERS = 1 << 13, }; // See Options enum above. diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp index e31cc6ecd98..6db2512950e 100644 --- a/src/mongo/db/query/query_planner_test_fixture.cpp +++ b/src/mongo/db/query/query_planner_test_fixture.cpp @@ -368,7 +368,6 @@ void QueryPlannerTest::runQueryFull( ASSERT_OK(statusWithCQ.getStatus()); cq = std::move(statusWithCQ.getValue()); cq->setSbeCompatible(markQueriesSbeCompatible); - cq->setForceGenerateRecordId(forceRecordId); auto statusWithMultiPlanSolns = QueryPlanner::plan(*cq, params); ASSERT_OK(statusWithMultiPlanSolns.getStatus()); @@ -446,7 +445,6 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query, ASSERT_OK(statusWithCQ.getStatus()); cq = std::move(statusWithCQ.getValue()); cq->setSbeCompatible(markQueriesSbeCompatible); - cq->setForceGenerateRecordId(forceRecordId); auto statusWithMultiPlanSolns = QueryPlanner::plan(*cq, params); plannerStatus = statusWithMultiPlanSolns.getStatus(); @@ -476,7 +474,6 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) { ASSERT_OK(statusWithCQ.getStatus()); cq = std::move(statusWithCQ.getValue()); cq->setSbeCompatible(markQueriesSbeCompatible); - cq->setForceGenerateRecordId(forceRecordId); auto statusWithMultiPlanSolns = QueryPlanner::plan(*cq, params); ASSERT_OK(statusWithMultiPlanSolns.getStatus()); @@ -505,7 +502,6 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) { ASSERT_OK(statusWithCQ.getStatus()); cq = std::move(statusWithCQ.getValue()); cq->setSbeCompatible(markQueriesSbeCompatible); - cq->setForceGenerateRecordId(forceRecordId); auto statusWithMultiPlanSolns = QueryPlanner::plan(*cq, params); plannerStatus = statusWithMultiPlanSolns.getStatus(); diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h index 35cc0d0c294..44f51282d55 100644 --- a/src/mongo/db/query/query_planner_test_fixture.h +++ b/src/mongo/db/query/query_planner_test_fixture.h @@ -269,12 +269,7 @@ protected: std::vector> solns; bool relaxBoundsCheck = false; - // Value used for the sbeCompatible flag in the CanonicalQuery objects created by the - // test. bool markQueriesSbeCompatible = false; - // Value used for the forceGenerateRecordId flag in the CanonicalQuery objects created by the - // test. - bool forceRecordId = false; }; } // namespace mongo diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h index a24cff08f2f..a7182a4251d 100644 --- a/src/mongo/db/query/query_solution.h +++ b/src/mongo/db/query/query_solution.h @@ -1528,8 +1528,9 @@ struct EqLookupNode : public QuerySolutionNode { } const ProvidedSortSet& providedSorts() const final { - // Right now, we conservatively return kEmptySet. A future optimization could theoretically - // take the "joinField" into account when deciding whether this provides a sort or not. + // TODO SERVER-62815: The ProvidedSortSet will need to be computed here in order to allow + // sort optimization. The "joinField" field overwrites the field in the result outer + // document, this can affect the provided sort. For now, use conservative kEmptySet. return kEmptySet; } diff --git a/src/mongo/db/query/sbe_stage_builder.cpp b/src/mongo/db/query/sbe_stage_builder.cpp index e21b72d0c0c..ab20b24f502 100644 --- a/src/mongo/db/query/sbe_stage_builder.cpp +++ b/src/mongo/db/query/sbe_stage_builder.cpp @@ -473,6 +473,26 @@ SlotBasedStageBuilder::SlotBasedStageBuilder(OperationContext* opCtx, _data.shouldTrackResumeToken = csn->requestResumeToken; _data.shouldUseTailableScan = csn->tailable; } + + for (const auto& node : getAllNodesByType(solution.root(), STAGE_VIRTUAL_SCAN)) { + auto vsn = static_cast(node); + if (!vsn->hasRecordId) { + _shouldProduceRecordIdSlot = false; + break; + } + } + + const auto [lookupNode, lookupCount] = getFirstNodeByType(solution.root(), STAGE_EQ_LOOKUP); + if (lookupCount) { + // TODO: SERVER-63604 optimize _shouldProduceRecordIdSlot maintenance + _shouldProduceRecordIdSlot = false; + } + + const auto [groupNode, groupCount] = getFirstNodeByType(solution.root(), STAGE_GROUP); + if (groupCount) { + // TODO: SERVER-63604 optimize _shouldProduceRecordIdSlot maintenance + _shouldProduceRecordIdSlot = false; + } } std::unique_ptr SlotBasedStageBuilder::build(const QuerySolutionNode* root) { @@ -480,15 +500,11 @@ std::unique_ptr SlotBasedStageBuilder::build(const QuerySolution invariant(!_buildHasStarted); _buildHasStarted = true; - // We always produce a 'resultSlot'. + // We always produce a 'resultSlot' and conditionally produce a 'recordIdSlot' based on the + // 'shouldProduceRecordIdSlot'. PlanStageReqs reqs; reqs.set(kResult); - // We force the root stage to produce a 'recordId' if the iteration can be - // resumed (via a resume token or a tailable cursor) or if the caller simply expects to be able - // to read it. - reqs.setIf(kRecordId, - (_data.shouldUseTailableScan || _data.shouldTrackResumeToken || - _cq.getForceGenerateRecordId())); + reqs.setIf(kRecordId, _shouldProduceRecordIdSlot); // Set the target namespace to '_mainNss'. This is necessary as some QuerySolutionNodes that // require a collection when stage building do not explicitly name which collection they are @@ -498,10 +514,11 @@ std::unique_ptr SlotBasedStageBuilder::build(const QuerySolution // Build the SBE plan stage tree. auto [stage, outputs] = build(root, reqs); - // Assert that we produced a 'resultSlot' and that we produced a 'recordIdSlot' only if it was - // needed. + // Assert that we produced a 'resultSlot' and that we prouced a 'recordIdSlot' if the + // 'shouldProduceRecordIdSlot' flag was set. Also assert that we produced an 'oplogTsSlot' if + // it's needed. invariant(outputs.has(kResult)); - invariant(reqs.has(kRecordId) == outputs.has(kRecordId)); + invariant(!_shouldProduceRecordIdSlot || outputs.has(kRecordId)); _data.outputs = std::move(outputs); @@ -527,10 +544,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder outputs.get(kReturnKey), sbe::makeE("newObj", sbe::makeEs())); } - // Don't advertize the RecordId output if none of our ancestors are going to use it. - if (!reqs.has(kRecordId)) { - outputs.clear(kRecordId); - } return {std::move(stage), std::move(outputs)}; } @@ -646,10 +659,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder iamMap, reqs.has(kIndexKeyPattern)); - // Remove the RecordId from the output if we were not requested to produce it. - if (!reqs.has(PlanStageSlots::kRecordId) && outputs.has(kRecordId)) { - outputs.clear(kRecordId); - } if (reqs.has(PlanStageSlots::kReturnKey)) { sbe::EExpression::Vector mkObjArgs; @@ -975,15 +984,10 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder _slotIdGenerator); outputs.set(kResult, fetchResultSlot); - // Propagate the RecordId output only if requested. - if (reqs.has(kRecordId)) { - outputs.set(kRecordId, fetchRecordIdSlot); - } else { - outputs.clear(kRecordId); - } + outputs.set(kRecordId, fetchRecordIdSlot); if (fn->filter) { - forwardingReqs = reqs.copy().set(kResult); + forwardingReqs = reqs.copy().set(kResult).set(kRecordId); relevantSlots = sbe::makeSV(); outputs.forEachSlot(forwardingReqs, [&](auto&& slot) { relevantSlots.push_back(slot); }); @@ -1672,10 +1676,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder if (mergeSortNode->dedup) { stage = sbe::makeS( std::move(stage), sbe::makeSV(outputs.get(kRecordId)), root->nodeId()); - // Stop propagating the RecordId output if none of our ancestors are going to use it. - if (!reqs.has(kRecordId)) { - outputs.clear(kRecordId); - } } return {std::move(stage), std::move(outputs)}; @@ -1894,10 +1894,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder if (orn->dedup) { stage = sbe::makeS( std::move(stage), sbe::makeSV(outputs.get(kRecordId)), root->nodeId()); - // Stop propagating the RecordId output if none of our ancestors are going to use it. - if (!reqs.has(kRecordId)) { - outputs.clear(kRecordId); - } } if (orn->filter) { @@ -2090,10 +2086,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder collatorSlot, root->nodeId()); } - // Stop propagating the RecordId output if none of our ancestors are going to use it. - if (!reqs.has(kRecordId)) { - outputs.clear(kRecordId); - } return {std::move(hashJoinStage), std::move(outputs)}; } @@ -2203,10 +2195,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder sortDirs, root->nodeId()); } - // Stop propagating the RecordId output if none of our ancestors are going to use it. - if (!reqs.has(kRecordId)) { - outputs.clear(kRecordId); - } return {std::move(mergeJoinStage), std::move(outputs)}; } @@ -2631,10 +2619,6 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder tassert( 5851600, "should have one and only one child for GROUP", groupNode->children.size() == 1); tassert(5851601, "GROUP should have had group-by key expression", idExpr); - tassert( - 6360401, - "GROUP cannot propagate a record id slot, but the record id was requested by the parent", - !reqs.has(kRecordId)); const auto& childNode = groupNode->children[0].get(); const auto& accStmts = groupNode->accumulators; @@ -2650,6 +2634,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder // Builds the child and gets the child result slot. auto [childStage, childOutputs] = build(childNode, childReqs); + _shouldProduceRecordIdSlot = false; tassert(6075900, "Expected no optimized expressions but got: {}"_format(_state.preGeneratedExprs.size()), diff --git a/src/mongo/db/query/sbe_stage_builder.h b/src/mongo/db/query/sbe_stage_builder.h index 1507069fe90..ab928f14329 100644 --- a/src/mongo/db/query/sbe_stage_builder.h +++ b/src/mongo/db/query/sbe_stage_builder.h @@ -541,6 +541,7 @@ private: PlanStageData _data; bool _buildHasStarted{false}; + bool _shouldProduceRecordIdSlot{true}; // Common parameters to SBE stage builder functions. StageBuilderState _state; diff --git a/src/mongo/db/query/sbe_stage_builder_expression.cpp b/src/mongo/db/query/sbe_stage_builder_expression.cpp index 5f612f65840..2edf241f48a 100644 --- a/src/mongo/db/query/sbe_stage_builder_expression.cpp +++ b/src/mongo/db/query/sbe_stage_builder_expression.cpp @@ -42,7 +42,6 @@ #include "mongo/db/exec/sbe/stages/project.h" #include "mongo/db/exec/sbe/stages/traverse.h" #include "mongo/db/exec/sbe/stages/union.h" -#include "mongo/db/exec/sbe/values/arith_common.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/accumulator.h" @@ -1750,6 +1749,19 @@ public: unsupportedExpression("$dateFromString"); } void visit(const ExpressionDateTrunc* expr) final { + auto frameId = _context->state.frameId(); + sbe::EExpression::Vector arguments; + sbe::EExpression::Vector bindings; + sbe::EVariable dateRef(frameId, 0); + sbe::EVariable unitRef(frameId, 1); + sbe::EVariable binSizeRef(frameId, 2); + sbe::EVariable timezoneRef(frameId, 3); + sbe::EVariable startOfWeekRef(frameId, 4); + + // An auxiliary boolean variable to hold a value of a common subexpression 'unit'=="week" + // (string). + sbe::EVariable unitIsWeekRef(frameId, 5); + auto children = expr->getChildren(); invariant(children.size() == 5); _context->ensureArity(2 + (expr->isBinSizeSpecified() ? 1 : 0) + @@ -1757,8 +1769,7 @@ public: (expr->isStartOfWeekSpecified() ? 1 : 0)); // Get child expressions. - auto startOfWeekExpression = - expr->isStartOfWeekSpecified() ? _context->popExpr() : makeConstant("sun"_sd); + auto startOfWeekExpression = expr->isStartOfWeekSpecified() ? _context->popExpr() : nullptr; auto timezoneExpression = expr->isTimezoneSpecified() ? _context->popExpr() : makeConstant("UTC"_sd); auto binSizeExpression = expr->isBinSizeSpecified() @@ -1768,186 +1779,109 @@ public: auto dateExpression = _context->popExpr(); auto timezoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); - auto [timezoneDBTag, timezoneDBVal] = - _context->state.data->env->getAccessor(timezoneDBSlot)->getViewOfValue(); - tassert(7003901, - "$dateTrunc first argument must be a timezoneDB object", - timezoneDBTag == sbe::value::TypeTags::timeZoneDB); - auto timezoneDB = sbe::value::getTimeZoneDBView(timezoneDBVal); - - // Local bind to hold the date expression result - auto dateFrameId = _context->state.frameId(); - sbe::EExpression::Vector dateBindings; - sbe::EVariable dateRef(dateFrameId, 0); - dateBindings.push_back(std::move(dateExpression)); // Set parameters for an invocation of built-in "dateTrunc" function. - sbe::EExpression::Vector arguments; arguments.push_back(makeVariable(timezoneDBSlot)); arguments.push_back(dateRef.clone()); - arguments.push_back(unitExpression->clone()); - arguments.push_back(binSizeExpression->clone()); - arguments.push_back(timezoneExpression->clone()); - arguments.push_back(startOfWeekExpression->clone()); + arguments.push_back(unitRef.clone()); + arguments.push_back(binSizeRef.clone()); + arguments.push_back(timezoneRef.clone()); + if (expr->isStartOfWeekSpecified()) { + // Parameter "startOfWeek" - if the time unit is the week, then pass value of parameter + // "startOfWeek" of "$dateTrunc" expression, otherwise pass a valid default value, since + // "dateTrunc" built-in function does not accept non-string type values for this + // parameter. + arguments.push_back(sbe::makeE( + unitIsWeekRef.clone(), startOfWeekRef.clone(), makeConstant("sun"_sd))); + } + + // Set bindings for the frame. + bindings.push_back(std::move(dateExpression)); + bindings.push_back(std::move(unitExpression)); + bindings.push_back(std::move(binSizeExpression)); + bindings.push_back(std::move(timezoneExpression)); + if (expr->isStartOfWeekSpecified()) { + bindings.push_back(std::move(startOfWeekExpression)); + bindings.push_back(generateIsEqualToStringCheck(unitRef, "week"_sd)); + } // Create an expression to invoke built-in "dateTrunc" function. auto dateTruncFunctionCall = sbe::makeE("dateTrunc"_sd, std::move(arguments)); - // Local bind to hold the $dateTrunc function call result - auto dateTruncFrameId = _context->state.frameId(); - sbe::EExpression::Vector dateTruncBindings; - sbe::EVariable dateTruncRef(dateTruncFrameId, 0); - dateTruncBindings.push_back(std::move(dateTruncFunctionCall)); - - // Local bind to hold the unitIsWeek common subexpression - auto unitIsWeekFrameId = _context->state.frameId(); - sbe::EExpression::Vector unitIsWeekBindings; - sbe::EVariable unitIsWeekRef(unitIsWeekFrameId, 0); - unitIsWeekBindings.push_back(generateIsEqualToStringCheck(*unitExpression, "week"_sd)); - // Create expressions to check that each argument to "dateTrunc" function exists, is not // null, and is of the correct type. std::vector inputValidationCases; // Return null if any of the parameters is either null or missing. inputValidationCases.push_back(generateReturnNullIfNullOrMissing(dateRef)); - inputValidationCases.push_back(generateReturnNullIfNullOrMissing(unitExpression->clone())); - inputValidationCases.push_back( - generateReturnNullIfNullOrMissing(binSizeExpression->clone())); - inputValidationCases.push_back( - generateReturnNullIfNullOrMissing(timezoneExpression->clone())); - inputValidationCases.emplace_back( - makeBinaryOp(sbe::EPrimBinary::logicAnd, - unitIsWeekRef.clone(), - generateNullOrMissing(startOfWeekExpression->clone())), - makeConstant(sbe::value::TypeTags::Null, 0)); + inputValidationCases.push_back(generateReturnNullIfNullOrMissing(unitRef)); + inputValidationCases.push_back(generateReturnNullIfNullOrMissing(binSizeRef)); + inputValidationCases.push_back(generateReturnNullIfNullOrMissing(timezoneRef)); + if (expr->isStartOfWeekSpecified()) { + inputValidationCases.emplace_back(makeBinaryOp(sbe::EPrimBinary::logicAnd, + unitIsWeekRef.clone(), + generateNullOrMissing(startOfWeekRef)), + makeConstant(sbe::value::TypeTags::Null, 0)); + } // "timezone" parameter validation. - if (timezoneExpression->as()) { - auto [timezoneTag, timezoneVal] = - timezoneExpression->as()->getConstant(); - tassert(7003907, - "$dateTrunc parameter 'timezone' must be a string", - sbe::value::isString(timezoneTag)); - tassert(7003908, - "$dateTrunc parameter 'timezone' must be a valid timezone", - sbe::vm::isValidTimezone(timezoneTag, timezoneVal, timezoneDB)); - } else { - inputValidationCases.emplace_back( - generateNonStringCheck(*timezoneExpression), - makeFail(5439100, "$dateTrunc parameter 'timezone' must be a string")); - inputValidationCases.emplace_back( - makeNot(makeFunction( - "isTimezone", makeVariable(timezoneDBSlot), timezoneExpression->clone())), - makeFail(5439101, "$dateTrunc parameter 'timezone' must be a valid timezone")); - } + inputValidationCases.emplace_back( + generateNonStringCheck(timezoneRef), + makeFail(5439100, "$dateTrunc parameter 'timezone' must be a string")); + inputValidationCases.emplace_back( + makeNot(makeFunction("isTimezone", makeVariable(timezoneDBSlot), timezoneRef.clone())), + makeFail(5439101, "$dateTrunc parameter 'timezone' must be a valid timezone")); // "date" parameter validation. inputValidationCases.emplace_back(generateFailIfNotCoercibleToDate( dateRef, ErrorCodes::Error{5439102}, "$dateTrunc"_sd, "date"_sd)); // "unit" parameter validation. - if (unitExpression->as()) { - auto [unitTag, unitVal] = unitExpression->as()->getConstant(); - tassert(7003902, - "$dateTrunc parameter 'unit' must be a string", - sbe::value::isString(unitTag)); - auto unitString = sbe::value::getStringView(unitTag, unitVal); - tassert(7003903, - "$dateTrunc parameter 'unit' must be a valid time unit", - isValidTimeUnit(unitString)); - } else { - inputValidationCases.emplace_back( - generateNonStringCheck(*unitExpression), - makeFail(5439103, "$dateTrunc parameter 'unit' must be a string")); - inputValidationCases.emplace_back( - makeNot(makeFunction("isTimeUnit", unitExpression->clone())), - makeFail(5439104, "$dateTrunc parameter 'unit' must be a valid time unit")); - } + inputValidationCases.emplace_back( + generateNonStringCheck(unitRef), + makeFail(5439103, "$dateTrunc parameter 'unit' must be a string")); + inputValidationCases.emplace_back( + makeNot(makeFunction("isTimeUnit", unitRef.clone())), + makeFail(5439104, "$dateTrunc parameter 'unit' must be a valid time unit")); // "binSize" parameter validation. if (expr->isBinSizeSpecified()) { - if (binSizeExpression->as()) { - auto [binSizeTag, binSizeValue] = - binSizeExpression->as()->getConstant(); - tassert( - 7003904, - "$dateTrunc parameter 'binSize' must be coercible to a positive 64-bit integer", - sbe::value::isNumber(binSizeTag)); - auto [binSizeLongOwn, binSizeLongTag, binSizeLongValue] = - sbe::value::genericNumConvert( - binSizeTag, binSizeValue, sbe::value::TypeTags::NumberInt64); - tassert( - 7003905, - "$dateTrunc parameter 'binSize' must be coercible to a positive 64-bit integer", - binSizeLongTag != sbe::value::TypeTags::Nothing); - auto binSize = sbe::value::bitcastTo(binSizeLongValue); - tassert( - 7003906, - "$dateTrunc parameter 'binSize' must be coercible to a positive 64-bit integer", - binSize > 0); - } else { - inputValidationCases.emplace_back( - makeNot(makeBinaryOp( + inputValidationCases.emplace_back( + makeNot(makeBinaryOp( + sbe::EPrimBinary::logicAnd, + makeBinaryOp( sbe::EPrimBinary::logicAnd, - makeBinaryOp(sbe::EPrimBinary::logicAnd, - makeFunction("isNumber", binSizeExpression->clone()), - makeFunction("exists", - sbe::makeE( - binSizeExpression->clone(), - sbe::value::TypeTags::NumberInt64))), - generatePositiveCheck(*binSizeExpression))), - makeFail( - 5439105, - "$dateTrunc parameter 'binSize' must be coercible to a positive 64-bit " - "integer")); - } + makeFunction("isNumber", binSizeRef.clone()), + makeFunction("exists", + sbe::makeE( + binSizeRef.clone(), sbe::value::TypeTags::NumberInt64))), + generatePositiveCheck(binSizeRef))), + makeFail(5439105, + "$dateTrunc parameter 'binSize' must be coercible to a positive 64-bit " + "integer")); } // "startOfWeek" parameter validation. if (expr->isStartOfWeekSpecified()) { - if (startOfWeekExpression->as()) { - auto [startOfWeekTag, startOfWeekVal] = - startOfWeekExpression->as()->getConstant(); - tassert(7003909, - "$dateTrunc parameter 'startOfWeek' must be a string", - sbe::value::isString(startOfWeekTag)); - auto startOfWeekString = sbe::value::getStringView(startOfWeekTag, startOfWeekVal); - tassert(7003910, - "$dateTrunc parameter 'startOfWeek' must be a valid day of the week", - isValidDayOfWeek(startOfWeekString)); - } else { - // If 'timeUnit' value is equal to "week" then validate "startOfWeek" parameter. - inputValidationCases.emplace_back( - makeBinaryOp(sbe::EPrimBinary::logicAnd, - unitIsWeekRef.clone(), - generateNonStringCheck(*startOfWeekExpression)), - makeFail(5439106, "$dateTrunc parameter 'startOfWeek' must be a string")); - inputValidationCases.emplace_back( - makeBinaryOp( - sbe::EPrimBinary::logicAnd, - unitIsWeekRef.clone(), - makeNot(makeFunction("isDayOfWeek", startOfWeekExpression->clone()))), - makeFail(5439107, - "$dateTrunc parameter 'startOfWeek' must be a valid day of the week")); - } + // If 'timeUnit' value is equal to "week" then validate "startOfWeek" parameter. + inputValidationCases.emplace_back( + makeBinaryOp(sbe::EPrimBinary::logicAnd, + unitIsWeekRef.clone(), + generateNonStringCheck(startOfWeekRef)), + makeFail(5439106, "$dateTrunc parameter 'startOfWeek' must be a string")); + inputValidationCases.emplace_back( + makeBinaryOp(sbe::EPrimBinary::logicAnd, + unitIsWeekRef.clone(), + makeNot(makeFunction("isDayOfWeek", startOfWeekRef.clone()))), + makeFail(5439107, + "$dateTrunc parameter 'startOfWeek' must be a valid day of the week")); } + auto dateTruncExpression = buildMultiBranchConditionalFromCaseValuePairs( + std::move(inputValidationCases), std::move(dateTruncFunctionCall)); _context->pushExpr(sbe::makeE( - dateFrameId, - std::move(dateBindings), - sbe::makeE( - dateTruncFrameId, - std::move(dateTruncBindings), - sbe::makeE(makeFunction("exists", dateTruncRef.clone()), - dateTruncRef.clone(), - sbe::makeE( - unitIsWeekFrameId, - std::move(unitIsWeekBindings), - buildMultiBranchConditionalFromCaseValuePairs( - std::move(inputValidationCases), - makeConstant(sbe::value::TypeTags::Nothing, 0))))))); + frameId, std::move(bindings), std::move(dateTruncExpression))); } void visit(const ExpressionDivide* expr) final { _context->ensureArity(2); @@ -2975,43 +2909,7 @@ public: unsupportedExpression(expr->getOpName()); } void visit(const ExpressionSubtract* expr) final { - invariant(expr->getChildren().size() == 2); - _context->ensureArity(2); - - auto rhs = _context->popExpr(); - auto lhs = _context->popExpr(); - - auto frameId = _context->state.frameId(); - auto binds = sbe::makeEs(std::move(lhs), std::move(rhs)); - sbe::EVariable lhsRef{frameId, 0}; - sbe::EVariable rhsRef{frameId, 1}; - - auto checkNullArguments = makeBinaryOp(sbe::EPrimBinary::logicOr, - generateNullOrMissing(lhsRef.clone()), - generateNullOrMissing(rhsRef.clone())); - - auto checkArgumentTypes = makeNot(sbe::makeE( - makeFunction("isNumber", lhsRef.clone()), - makeFunction("isNumber", rhsRef.clone()), - makeBinaryOp(sbe::EPrimBinary::logicAnd, - makeFunction("isDate", lhsRef.clone()), - makeBinaryOp(sbe::EPrimBinary::logicOr, - makeFunction("isNumber", rhsRef.clone()), - makeFunction("isDate", rhsRef.clone()))))); - - auto subtractOp = makeBinaryOp(sbe::EPrimBinary::sub, lhsRef.clone(), rhsRef.clone()); - auto subtractExpr = buildMultiBranchConditional( - CaseValuePair{std::move(checkNullArguments), - makeConstant(sbe::value::TypeTags::Null, 0)}, - CaseValuePair{ - std::move(checkArgumentTypes), - makeFail(5156200, - "Only numbers and dates are allowed in an $subtract expression. To " - "subtract a number from a date, the date must be the first argument.")}, - std::move(subtractOp)); - - _context->pushExpr( - sbe::makeE(frameId, std::move(binds), std::move(subtractExpr))); + unsupportedExpression(expr->getOpName()); } void visit(const ExpressionSwitch* expr) final { visitConditionalExpression(expr); @@ -3484,20 +3382,16 @@ private: return {generateNullOrMissing(variable), makeConstant(sbe::value::TypeTags::Null, 0)}; } - static CaseValuePair generateReturnNullIfNullOrMissing(std::unique_ptr expr) { - return {generateNullOrMissing(std::move(expr)), - makeConstant(sbe::value::TypeTags::Null, 0)}; - } - /** * Creates a boolean expression to check if 'variable' is equal to string 'string'. */ static std::unique_ptr generateIsEqualToStringCheck( - const sbe::EExpression& expr, StringData string) { - return sbe::makeE( - sbe::EPrimBinary::logicAnd, - makeFunction("isString", expr.clone()), - sbe::makeE(sbe::EPrimBinary::eq, expr.clone(), makeConstant(string))); + const sbe::EVariable& variable, StringData string) { + return sbe::makeE(sbe::EPrimBinary::logicAnd, + makeFunction("isString", variable.clone()), + sbe::makeE(sbe::EPrimBinary::eq, + variable.clone(), + makeConstant(string))); } /** diff --git a/src/mongo/db/query/sbe_stage_builder_helpers.cpp b/src/mongo/db/query/sbe_stage_builder_helpers.cpp index b6da4d12a04..e9672bce394 100644 --- a/src/mongo/db/query/sbe_stage_builder_helpers.cpp +++ b/src/mongo/db/query/sbe_stage_builder_helpers.cpp @@ -113,20 +113,16 @@ std::unique_ptr makeIsMember(std::unique_ptr return makeIsMember(std::move(input), std::move(arr), std::move(collatorVar)); } -std::unique_ptr generateNullOrMissingExpr(const sbe::EExpression& expr) { - return makeFunction("fillEmpty", + +std::unique_ptr generateNullOrMissing(const sbe::EVariable& var) { + return makeBinaryOp(sbe::EPrimBinary::logicOr, + makeNot(makeFunction("exists", var.clone())), makeFunction("typeMatch", - expr.clone(), + var.clone(), makeConstant(sbe::value::TypeTags::NumberInt64, sbe::value::bitcastFrom( getBSONTypeMask(BSONType::jstNULL) | - getBSONTypeMask(BSONType::Undefined)))), - sbe::makeE(sbe::value::TypeTags::Boolean, - sbe::value::bitcastFrom(true))); -} - -std::unique_ptr generateNullOrMissing(const sbe::EVariable& var) { - return generateNullOrMissingExpr(var); + getBSONTypeMask(BSONType::Undefined))))); } std::unique_ptr generateNullOrMissing(const sbe::FrameId frameId, @@ -136,7 +132,14 @@ std::unique_ptr generateNullOrMissing(const sbe::FrameId frame } std::unique_ptr generateNullOrMissing(std::unique_ptr arg) { - return generateNullOrMissingExpr(*arg); + return makeBinaryOp(sbe::EPrimBinary::logicOr, + makeNot(makeFunction("exists", arg->clone())), + makeFunction("typeMatch", + arg->clone(), + makeConstant(sbe::value::TypeTags::NumberInt64, + sbe::value::bitcastFrom( + getBSONTypeMask(BSONType::jstNULL) | + getBSONTypeMask(BSONType::Undefined))))); } std::unique_ptr generateNonNumericCheck(const sbe::EVariable& var) { @@ -173,9 +176,9 @@ std::unique_ptr generateNonPositiveCheck(const sbe::EVariable& sbe::value::bitcastFrom(0))); } -std::unique_ptr generatePositiveCheck(const sbe::EExpression& expr) { +std::unique_ptr generatePositiveCheck(const sbe::EVariable& var) { return makeBinaryOp(sbe::EPrimBinary::EPrimBinary::greater, - expr.clone(), + var.clone(), sbe::makeE(sbe::value::TypeTags::NumberInt32, sbe::value::bitcastFrom(0))); } @@ -191,8 +194,8 @@ std::unique_ptr generateNonObjectCheck(const sbe::EVariable& v return makeNot(makeFunction("isObject", var.clone())); } -std::unique_ptr generateNonStringCheck(const sbe::EExpression& expr) { - return makeNot(makeFunction("isString", expr.clone())); +std::unique_ptr generateNonStringCheck(const sbe::EVariable& var) { + return makeNot(makeFunction("isString", var.clone())); } std::unique_ptr generateNullishOrNotRepresentableInt32Check( @@ -568,28 +571,6 @@ EvalExprStagePair generateShortCircuitingLogicalOp(sbe::EPrimBinary::Op logicOp, return std::move(branches[0]); } - bool exprOnlyBranches = true; - for (const auto& [expr, stage] : branches) { - if (!stage.stageIsNull()) { - exprOnlyBranches = false; - break; - } - } - - if (exprOnlyBranches) { - std::unique_ptr exprOnlyOp; - for (int32_t i = branches.size() - 1; i >= 0; i--) { - auto& [expr, _] = branches[i]; - auto stateExpr = stateHelper.getBool(expr.extractExpr()); - if (exprOnlyOp) { - exprOnlyOp = makeBinaryOp(logicOp, std::move(stateExpr), std::move(exprOnlyOp)); - } else { - exprOnlyOp = std::move(stateExpr); - } - } - return {EvalExpr{std::move(exprOnlyOp)}, EvalStage{}}; - } - // Prepare to create limit-1/union with N branches (where N is the number of operands). Each // branch will be evaluated from left to right until one of the branches produces a value. The // first N-1 branches have a FilterStage to control whether they produce a value. If a branch's diff --git a/src/mongo/db/query/sbe_stage_builder_helpers.h b/src/mongo/db/query/sbe_stage_builder_helpers.h index 3225859bd9a..a9e78644419 100644 --- a/src/mongo/db/query/sbe_stage_builder_helpers.h +++ b/src/mongo/db/query/sbe_stage_builder_helpers.h @@ -114,7 +114,7 @@ std::unique_ptr generateNonPositiveCheck(const sbe::EVariable& * Generates an EExpression that checks if the input expression is a positive number (i.e. > 0) * _assuming that_ it has already been verified to be numeric. */ -std::unique_ptr generatePositiveCheck(const sbe::EExpression& expr); +std::unique_ptr generatePositiveCheck(const sbe::EVariable& var); /** * Generates an EExpression that checks if the input expression is a negative (i.e., < 0) number @@ -132,7 +132,7 @@ std::unique_ptr generateNonObjectCheck(const sbe::EVariable& v * Generates an EExpression that checks if the input expression is not a string, _assuming that * it has already been verified to be neither null nor missing. */ -std::unique_ptr generateNonStringCheck(const sbe::EExpression& expr); +std::unique_ptr generateNonStringCheck(const sbe::EVariable& var); /** * Generates an EExpression that checks whether the input expression is null, missing, or diff --git a/src/mongo/db/query/sbe_stage_builder_lookup.cpp b/src/mongo/db/query/sbe_stage_builder_lookup.cpp index aa4ae7fbde3..3b35fb71ef6 100644 --- a/src/mongo/db/query/sbe_stage_builder_lookup.cpp +++ b/src/mongo/db/query/sbe_stage_builder_lookup.cpp @@ -1063,6 +1063,9 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder _state.data->foreignHashJoinCollections.emplace(eqLookupNode->foreignCollection); } + // $lookup creates its own output documents. + _shouldProduceRecordIdSlot = false; + auto localReqs = reqs.copy().set(kResult); auto [localStage, localOutputs] = build(eqLookupNode->children[0].get(), localReqs); SlotId localDocumentSlot = localOutputs.get(PlanStageSlots::kResult); diff --git a/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp b/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp index 91042b7b096..c6b0525dea9 100644 --- a/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp +++ b/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp @@ -60,10 +60,6 @@ SbeStageBuilderTestFixture::buildPlanStage( auto statusWithCQ = CanonicalQuery::canonicalize(operationContext(), std::move(findCommand), false, expCtx); ASSERT_OK(statusWithCQ.getStatus()); - if (hasRecordId) { - // Force the builder to generate the RecordId output even if it isn't needed by the plan. - statusWithCQ.getValue()->setForceGenerateRecordId(true); - } CollectionMock coll(_nss); CollectionPtr collPtr(&coll); diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript index f4c2d2d9447..54bb5718fa1 100644 --- a/src/mongo/db/repl/SConscript +++ b/src/mongo/db/repl/SConscript @@ -548,7 +548,6 @@ env.Library( '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/db/s/sharding_runtime_d', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/serverless/serverless_lock', '$BUILD_DIR/mongo/db/session/kill_sessions_local', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/storage/historical_ident_tracker', @@ -764,7 +763,6 @@ env.Library( '$BUILD_DIR/mongo/db/commands/test_commands_enabled', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/serverless/serverless_lock', '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/storage/journal_flusher', 'delayable_timeout_callback', @@ -1247,7 +1245,6 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/serverless/serverless_lock', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/executor/scoped_task_executor', 'repl_server_parameters', @@ -1416,7 +1413,6 @@ env.Library( '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/ops/write_ops_exec', '$BUILD_DIR/mongo/db/pipeline/process_interface/mongo_process_interface', - '$BUILD_DIR/mongo/db/serverless/serverless_lock', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger_import', '$BUILD_DIR/mongo/db/transaction/transaction', @@ -1489,7 +1485,6 @@ env.Library( "$BUILD_DIR/mongo/db/catalog/local_oplog_info", "$BUILD_DIR/mongo/db/concurrency/exception_util", "$BUILD_DIR/mongo/db/index_builds_coordinator_interface", - "$BUILD_DIR/mongo/db/serverless/serverless_lock", ], ) diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp index 7e311a0b7a4..d405146eb37 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp +++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp @@ -53,16 +53,17 @@ namespace repl { CollectionBulkLoaderImpl::CollectionBulkLoaderImpl(ServiceContext::UniqueClient&& client, ServiceContext::UniqueOperationContext&& opCtx, - const NamespaceString& nss, + std::unique_ptr&& autoColl, const BSONObj& idIndexSpec) : _client{std::move(client)}, _opCtx{std::move(opCtx)}, - _nss{nss}, + _collection{std::move(autoColl)}, + _nss{_collection->getCollection()->ns()}, _idIndexBlock(std::make_unique()), _secondaryIndexesBlock(std::make_unique()), _idIndexSpec(idIndexSpec.getOwned()) { invariant(_opCtx); - invariant(!_nss.isEmpty()); + invariant(_collection); } CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() { @@ -71,8 +72,7 @@ CollectionBulkLoaderImpl::~CollectionBulkLoaderImpl() { } Status CollectionBulkLoaderImpl::init(const std::vector& secondaryIndexSpecs) { - return _runTaskReleaseResourcesOnFailure([&]() -> Status { - AutoGetCollection coll(_opCtx.get(), _nss, MODE_X); + return _runTaskReleaseResourcesOnFailure([&secondaryIndexSpecs, this]() -> Status { // This method is called during initial sync of a replica set member, so we can safely tell // the index builders to build in the foreground instead of using the hybrid approach. The // member won't be available to be queried by anyone until it's caught up with the primary. @@ -80,52 +80,57 @@ Status CollectionBulkLoaderImpl::init(const std::vector& secondaryIndex // locks as yielding a MODE_X/MODE_S lock isn't allowed. _secondaryIndexesBlock->setIndexBuildMethod(IndexBuildMethod::kForeground); _idIndexBlock->setIndexBuildMethod(IndexBuildMethod::kForeground); - return writeConflictRetry(_opCtx.get(), "CollectionBulkLoader::init", _nss.ns(), [&] { - WriteUnitOfWork wuow(_opCtx.get()); - // All writes in CollectionBulkLoaderImpl should be unreplicated. - // The opCtx is accessed indirectly through _secondaryIndexesBlock. - UnreplicatedWritesBlock uwb(_opCtx.get()); - // This enforces the buildIndexes setting in the replica set configuration. - CollectionWriter collWriter(_opCtx.get(), coll); - auto indexCatalog = collWriter.getWritableCollection(_opCtx.get())->getIndexCatalog(); - auto specs = indexCatalog->removeExistingIndexesNoChecks( - _opCtx.get(), collWriter.get(), secondaryIndexSpecs); - if (specs.size()) { - _secondaryIndexesBlock->ignoreUniqueConstraint(); - auto status = _secondaryIndexesBlock - ->init(_opCtx.get(), - collWriter, - specs, - MultiIndexBlock::kNoopOnInitFn, - /*forRecovery=*/false) - .getStatus(); - if (!status.isOK()) { - return status; + return writeConflictRetry( + _opCtx.get(), + "CollectionBulkLoader::init", + _collection->getNss().ns(), + [&secondaryIndexSpecs, this] { + WriteUnitOfWork wuow(_opCtx.get()); + // All writes in CollectionBulkLoaderImpl should be unreplicated. + // The opCtx is accessed indirectly through _secondaryIndexesBlock. + UnreplicatedWritesBlock uwb(_opCtx.get()); + // This enforces the buildIndexes setting in the replica set configuration. + CollectionWriter collWriter(_opCtx.get(), *_collection); + auto indexCatalog = + collWriter.getWritableCollection(_opCtx.get())->getIndexCatalog(); + auto specs = indexCatalog->removeExistingIndexesNoChecks( + _opCtx.get(), collWriter.get(), secondaryIndexSpecs); + if (specs.size()) { + _secondaryIndexesBlock->ignoreUniqueConstraint(); + auto status = _secondaryIndexesBlock + ->init(_opCtx.get(), + collWriter, + specs, + MultiIndexBlock::kNoopOnInitFn, + /*forRecovery=*/false) + .getStatus(); + if (!status.isOK()) { + return status; + } + } else { + _secondaryIndexesBlock.reset(); } - } else { - _secondaryIndexesBlock.reset(); - } - if (!_idIndexSpec.isEmpty()) { - auto status = - _idIndexBlock - ->init( - _opCtx.get(), collWriter, _idIndexSpec, MultiIndexBlock::kNoopOnInitFn) - .getStatus(); - if (!status.isOK()) { - return status; + if (!_idIndexSpec.isEmpty()) { + auto status = _idIndexBlock + ->init(_opCtx.get(), + collWriter, + _idIndexSpec, + MultiIndexBlock::kNoopOnInitFn) + .getStatus(); + if (!status.isOK()) { + return status; + } + } else { + _idIndexBlock.reset(); } - } else { - _idIndexBlock.reset(); - } - wuow.commit(); - return Status::OK(); - }); + wuow.commit(); + return Status::OK(); + }); }); } Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( - const CollectionPtr& coll, const std::vector::const_iterator begin, const std::vector::const_iterator end) { auto iter = begin; @@ -148,7 +153,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( bytesInBlock += doc.objsize(); // This version of insert will not update any indexes. const auto status = collection_internal::insertDocumentForBulkLoader( - _opCtx.get(), coll, doc, onRecordInserted); + _opCtx.get(), **_collection, doc, onRecordInserted); if (!status.isOK()) { return status; } @@ -168,7 +173,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( status = writeConflictRetry(_opCtx.get(), "_addDocumentToIndexBlocks", _nss.ns(), [&] { WriteUnitOfWork wunit(_opCtx.get()); for (size_t index = 0; index < locs.size(); ++index) { - status = _addDocumentToIndexBlocks(coll, *iter++, locs.at(index)); + status = _addDocumentToIndexBlocks(*iter++, locs.at(index)); if (!status.isOK()) { return status; } @@ -185,7 +190,6 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( } Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection( - const CollectionPtr& coll, const std::vector::const_iterator begin, const std::vector::const_iterator end) { for (auto iter = begin; iter != end; ++iter) { @@ -196,7 +200,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection( // For capped collections, we use regular insertDocument, which // will update pre-existing indexes. const auto status = collection_internal::insertDocument( - _opCtx.get(), coll, InsertStatement(doc), nullptr); + _opCtx.get(), **_collection, InsertStatement(doc), nullptr); if (!status.isOK()) { return status; } @@ -213,20 +217,17 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection( Status CollectionBulkLoaderImpl::insertDocuments(const std::vector::const_iterator begin, const std::vector::const_iterator end) { return _runTaskReleaseResourcesOnFailure([&] { - AutoGetCollection coll( - _opCtx.get(), _nss, fixLockModeForSystemDotViewsChanges(_nss, MODE_IX)); UnreplicatedWritesBlock uwb(_opCtx.get()); if (_idIndexBlock || _secondaryIndexesBlock) { - return _insertDocumentsForUncappedCollection(*coll, begin, end); + return _insertDocumentsForUncappedCollection(begin, end); } else { - return _insertDocumentsForCappedCollection(*coll, begin, end); + return _insertDocumentsForCappedCollection(begin, end); } }); } Status CollectionBulkLoaderImpl::commit() { return _runTaskReleaseResourcesOnFailure([&] { - AutoGetCollection coll(_opCtx.get(), _nss, MODE_X); _stats.startBuildingIndexes = Date_t::now(); LOGV2_DEBUG(21130, 2, @@ -238,23 +239,25 @@ Status CollectionBulkLoaderImpl::commit() { // Commit before deleting dups, so the dups will be removed from secondary indexes when // deleted. if (_secondaryIndexesBlock) { - auto status = _secondaryIndexesBlock->dumpInsertsFromBulk(_opCtx.get(), *coll); + auto status = _secondaryIndexesBlock->dumpInsertsFromBulk(_opCtx.get(), + _collection->getCollection()); if (!status.isOK()) { return status; } // This should always return Status::OK() as the foreground index build doesn't install // an interceptor. - invariant(_secondaryIndexesBlock->checkConstraints(_opCtx.get(), *coll)); + invariant(_secondaryIndexesBlock->checkConstraints(_opCtx.get(), + _collection->getCollection())); status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [&] { + _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] { WriteUnitOfWork wunit(_opCtx.get()); - auto status = - _secondaryIndexesBlock->commit(_opCtx.get(), - coll.getWritableCollection(_opCtx.get()), - MultiIndexBlock::kNoopOnCreateEachFn, - MultiIndexBlock::kNoopOnCommitFn); + auto status = _secondaryIndexesBlock->commit( + _opCtx.get(), + _collection->getWritableCollection(_opCtx.get()), + MultiIndexBlock::kNoopOnCreateEachFn, + MultiIndexBlock::kNoopOnCommitFn); if (!status.isOK()) { return status; } @@ -268,10 +271,10 @@ Status CollectionBulkLoaderImpl::commit() { if (_idIndexBlock) { // Do not do inside a WriteUnitOfWork (required by dumpInsertsFromBulk). - auto status = - _idIndexBlock->dumpInsertsFromBulk(_opCtx.get(), *coll, [&](const RecordId& rid) { + auto status = _idIndexBlock->dumpInsertsFromBulk( + _opCtx.get(), _collection->getCollection(), [&](const RecordId& rid) { return writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [&] { + _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this, &rid] { WriteUnitOfWork wunit(_opCtx.get()); // If we were to delete the document after committing the index build, // it's possible that the storage engine unindexes a different record @@ -279,12 +282,13 @@ Status CollectionBulkLoaderImpl::commit() { // before committing the index build, the index removal code uses // 'dupsAllowed', which forces the storage engine to only unindex // records that match the same key and RecordId. - (*coll)->deleteDocument(_opCtx.get(), - kUninitializedStmtId, - rid, - nullptr /** OpDebug **/, - false /* fromMigrate */, - true /* noWarn */); + (*_collection) + ->deleteDocument(_opCtx.get(), + kUninitializedStmtId, + rid, + nullptr /** OpDebug **/, + false /* fromMigrate */, + true /* noWarn */); wunit.commit(); return Status::OK(); }); @@ -296,12 +300,13 @@ Status CollectionBulkLoaderImpl::commit() { // Commit the _id index, there won't be any documents with duplicate _ids as they were // deleted prior to this. status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [&] { + _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] { WriteUnitOfWork wunit(_opCtx.get()); - auto status = _idIndexBlock->commit(_opCtx.get(), - coll.getWritableCollection(_opCtx.get()), - MultiIndexBlock::kNoopOnCreateEachFn, - MultiIndexBlock::kNoopOnCommitFn); + auto status = + _idIndexBlock->commit(_opCtx.get(), + _collection->getWritableCollection(_opCtx.get()), + MultiIndexBlock::kNoopOnCreateEachFn, + MultiIndexBlock::kNoopOnCommitFn); if (!status.isOK()) { return status; } @@ -325,31 +330,34 @@ Status CollectionBulkLoaderImpl::commit() { // _releaseResources. _idIndexBlock.reset(); _secondaryIndexesBlock.reset(); + _collection.reset(); return Status::OK(); }); } void CollectionBulkLoaderImpl::_releaseResources() { invariant(&cc() == _opCtx->getClient()); - AutoGetCollection coll(_opCtx.get(), _nss, MODE_X); if (_secondaryIndexesBlock) { - CollectionWriter collWriter(_opCtx.get(), coll); + CollectionWriter collWriter(_opCtx.get(), *_collection); _secondaryIndexesBlock->abortIndexBuild( _opCtx.get(), collWriter, MultiIndexBlock::kNoopOnCleanUpFn); _secondaryIndexesBlock.reset(); } if (_idIndexBlock) { - CollectionWriter collWriter(_opCtx.get(), coll); + CollectionWriter collWriter(_opCtx.get(), *_collection); _idIndexBlock->abortIndexBuild(_opCtx.get(), collWriter, MultiIndexBlock::kNoopOnCleanUpFn); _idIndexBlock.reset(); } + + // release locks. + _collection.reset(); } template Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task) noexcept { AlternativeClientRegion acr(_client); - ScopeGuard guard([&] { _releaseResources(); }); + ScopeGuard guard([this] { _releaseResources(); }); try { const auto status = task(); if (status.isOK()) { @@ -361,13 +369,12 @@ Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task } } -Status CollectionBulkLoaderImpl::_addDocumentToIndexBlocks(const CollectionPtr& collection, - const BSONObj& doc, +Status CollectionBulkLoaderImpl::_addDocumentToIndexBlocks(const BSONObj& doc, const RecordId& loc) { if (_idIndexBlock) { auto status = _idIndexBlock->insertSingleDocumentForInitialSyncOrRecovery( _opCtx.get(), - collection, + _collection->getCollection(), doc, loc, // This caller / code path does not have cursors to save/restore. @@ -381,7 +388,7 @@ Status CollectionBulkLoaderImpl::_addDocumentToIndexBlocks(const CollectionPtr& if (_secondaryIndexesBlock) { auto status = _secondaryIndexesBlock->insertSingleDocumentForInitialSyncOrRecovery( _opCtx.get(), - collection, + _collection->getCollection(), doc, loc, // This caller / code path does not have cursors to save/restore. diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h index 61c2d21d6a8..d7077376478 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.h +++ b/src/mongo/db/repl/collection_bulk_loader_impl.h @@ -62,7 +62,7 @@ public: CollectionBulkLoaderImpl(ServiceContext::UniqueClient&& client, ServiceContext::UniqueOperationContext&& opCtx, - const NamespaceString& nss, + std::unique_ptr&& autoColl, const BSONObj& idIndexSpec); virtual ~CollectionBulkLoaderImpl(); @@ -86,8 +86,7 @@ private: /** * For capped collections, each document will be inserted in its own WriteUnitOfWork. */ - Status _insertDocumentsForCappedCollection(const CollectionPtr& coll, - std::vector::const_iterator begin, + Status _insertDocumentsForCappedCollection(std::vector::const_iterator begin, std::vector::const_iterator end); /** @@ -95,19 +94,17 @@ private: * collectionBulkLoaderBatchSizeInBytes or up to one document size greater. All insertions in a * given batch will be inserted in one WriteUnitOfWork. */ - Status _insertDocumentsForUncappedCollection(const CollectionPtr& coll, - std::vector::const_iterator begin, + Status _insertDocumentsForUncappedCollection(std::vector::const_iterator begin, std::vector::const_iterator end); /** * Adds document and associated RecordId to index blocks after inserting into RecordStore. */ - Status _addDocumentToIndexBlocks(const CollectionPtr& coll, - const BSONObj& doc, - const RecordId& loc); + Status _addDocumentToIndexBlocks(const BSONObj& doc, const RecordId& loc); ServiceContext::UniqueClient _client; ServiceContext::UniqueOperationContext _opCtx; + std::unique_ptr _collection; NamespaceString _nss; std::unique_ptr _idIndexBlock; std::unique_ptr _secondaryIndexesBlock; diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp index cd562d19af5..361e6aabe1f 100644 --- a/src/mongo/db/repl/initial_syncer.cpp +++ b/src/mongo/db/repl/initial_syncer.cpp @@ -63,7 +63,6 @@ #include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/transaction_oplog_application.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" @@ -578,7 +577,6 @@ void InitialSyncer::_tearDown_inlock(OperationContext* opCtx, _storage->oplogDiskLocRegister(opCtx, initialDataTimestamp, orderedCommit); tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); - ServerlessOperationLockRegistry::recoverLocks(opCtx); reconstructPreparedTransactions(opCtx, repl::OplogApplication::Mode::kInitialSync); _replicationProcess->getConsistencyMarkers()->setInitialSyncIdIfNotSet(opCtx); diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp index 8fde4801165..e98b65b9d76 100644 --- a/src/mongo/db/repl/initial_syncer_test.cpp +++ b/src/mongo/db/repl/initial_syncer_test.cpp @@ -2049,7 +2049,6 @@ TEST_F( "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); // Start the real work. ASSERT_OK(initialSyncer->startup(opCtx.get(), initialSyncMaxAttempts)); @@ -2092,8 +2091,6 @@ TEST_F(InitialSyncerTest, "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); - { executor::NetworkInterfaceMock::InNetworkGuard guard(net); @@ -2202,7 +2199,6 @@ TEST_F(InitialSyncerTest, "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -2274,7 +2270,6 @@ TEST_F( "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -2586,7 +2581,6 @@ TEST_F(InitialSyncerTest, InitialSyncerRetriesLastOplogEntryFetcherNetworkError) "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -3231,8 +3225,6 @@ TEST_F(InitialSyncerTest, InitialSyncerHandlesNetworkErrorsFromRollbackCheckerAf "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); - auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -3547,7 +3539,6 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -4213,7 +4204,6 @@ TEST_F(InitialSyncerTest, "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); doSuccessfulInitialSyncWithOneBatch(); } @@ -4229,7 +4219,6 @@ TEST_F(InitialSyncerTest, "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); auto initialSyncer = &getInitialSyncer(); auto opCtx = makeOpCtx(); @@ -4563,7 +4552,6 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) { "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); // Skip clearing initial sync progress so that we can check initialSyncStatus fields after // initial sync is complete. @@ -4933,7 +4921,6 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgressForNetwork "skipRecoverTenantMigrationAccessBlockers"); FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); // Skip clearing initial sync progress so that we can check initialSyncStatus fields after // initial sync is complete. diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 016e9be97c1..83866de6c9d 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -1863,21 +1863,19 @@ Status applyOperation_inlock(OperationContext* opCtx, case OpTypeEnum::kInsertGlobalIndexKey: { invariant(op.getUuid()); - global_index::insertKey( - opCtx, - *op.getUuid(), - op.getObject().getObjectField(global_index::kOplogEntryIndexKeyFieldName), - op.getObject().getObjectField(global_index::kOplogEntryDocKeyFieldName)); + global_index::insertKey(opCtx, + *op.getUuid(), + op.getObject().getObjectField("ik"), + op.getObject().getObjectField("dk")); break; } case OpTypeEnum::kDeleteGlobalIndexKey: { invariant(op.getUuid()); - global_index::deleteKey( - opCtx, - *op.getUuid(), - op.getObject().getObjectField(global_index::kOplogEntryIndexKeyFieldName), - op.getObject().getObjectField(global_index::kOplogEntryDocKeyFieldName)); + global_index::deleteKey(opCtx, + *op.getUuid(), + op.getObject().getObjectField("ik"), + op.getObject().getObjectField("dk")); break; } default: { diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp index 82fc3d8f4f8..8c658574ef0 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test.cpp +++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp @@ -723,7 +723,7 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentInclud _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nss, true); - ASSERT_TRUE(docExists(_opCtx.get(), nss, doc)); + // TODO SERVER-67423: use docExists to check that the doc actually got inserted } TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentIncorrectTenantId) { @@ -744,8 +744,8 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentIncorr _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nssTenant2, false), ExceptionFor); - ASSERT_FALSE(docExists(_opCtx.get(), nssTenant1, doc)); - ASSERT_FALSE(docExists(_opCtx.get(), nssTenant2, doc)); + // TODO SERVER-67423: use docExists to check that the doc still exists on nssTenant1, and does + // not exist on nssTenant2 } TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentIncludesTenantId) { @@ -769,11 +769,11 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentInclud _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nss, true); - // Check that the doc actually got deleted. - ASSERT_FALSE(docExists(_opCtx.get(), nss, doc)); + // TODO SERVER-67423: use docExists to check that the doc actually got deleted } -TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentIncorrectTenantId) { +TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, // see TODO SERVER-67423 below + applyOplogEntryOrGroupedInsertsDeleteDocumentIncorrectTenantId) { RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); const auto commonNss("test.t"_sd); @@ -788,10 +788,16 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentIncorr auto op = makeOplogEntry(OpTypeEnum::kDelete, nssTenant2, boost::none); - _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nssTenant2, false); + ASSERT_THROWS( + _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nssTenant2, false), + ExceptionFor); - ASSERT_TRUE(docExists(_opCtx.get(), nssTenant1, doc)); - ASSERT_FALSE(docExists(_opCtx.get(), nssTenant2, doc)); + // TODO SERVER-67423: use docExists to check that the doc still exists on nssTenant1, and does + // not exist on nssTenant2. Also, we are using OplogApplierImplTestEnableSteadyStateConstraints + // because according to OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon not enabling + // steady state constraints allows the delete to fail silently. While updating SERVER-67423, we + // can instead use docExists to check the results of the deletion rather than rely on the + // exception } // Steady state constraints are required for secondaries in order to avoid turning an insert into an @@ -842,9 +848,7 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsUpdateDocumentInclud _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nss, true); - // Check that the doc exists in its new updated form. - BSONObj updatedDoc = BSON("_id" << 0 << "a" << 1); - ASSERT_TRUE(docExists(_opCtx.get(), nss, updatedDoc)); + // TODO SERVER-67423: use docExists to check that the doc exists in its new updated form } TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsUpdateDocumentIncorrectTenantId) { @@ -871,8 +875,8 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsUpdateDocumentIncorr _testApplyOplogEntryOrGroupedInsertsCrudOperation(ErrorCodes::OK, op, nssTenant2, true), ExceptionFor); - ASSERT_TRUE(docExists(_opCtx.get(), nssTenant1, doc)); - ASSERT_FALSE(docExists(_opCtx.get(), nssTenant2, doc)); + // TODO SERVER-67423: use docExists to check that the original doc still exists on nssTenant1, + // and no doc exists on nssTenant2 } class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp index 8c71135dd72..219dcb5be1c 100644 --- a/src/mongo/db/repl/oplog_entry.cpp +++ b/src/mongo/db/repl/oplog_entry.cpp @@ -32,7 +32,6 @@ #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/global_index.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" #include "mongo/db/server_feature_flags_gen.h" @@ -136,8 +135,7 @@ ReplOperation makeGlobalIndexCrudOperation(const NamespaceString& indexNss, // required oplog entry field. op.setNss(indexNss.getCommandNS()); op.setUuid(indexUuid); - op.setObject(BSON(global_index::kOplogEntryIndexKeyFieldName - << key << global_index::kOplogEntryDocKeyFieldName << docKey)); + op.setObject(BSON("ik" << key << "dk" << docKey)); return op; } } // namespace diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp index da06257acdf..c15f88c3df0 100644 --- a/src/mongo/db/repl/primary_only_service.cpp +++ b/src/mongo/db/repl/primary_only_service.cpp @@ -777,16 +777,6 @@ std::shared_ptr PrimaryOnlyService::_insertNewInst return instance->run(std::move(scopedExecutor), std::move(token)); }) - // TODO SERVER-61717 remove this error handler once instance are automatically released - // at the end of run() - .onError([this, instanceID](Status status) { - LOGV2(6531507, - "Removing instance due to ConflictingServerlessOperation error", - "instanceID"_attr = instanceID); - releaseInstance(instanceID, Status::OK()); - - return status; - }) .semi(); auto [it, inserted] = _activeInstances.try_emplace( diff --git a/src/mongo/db/repl/primary_only_service_op_observer.h b/src/mongo/db/repl/primary_only_service_op_observer.h index 391f612daa9..025be4d0617 100644 --- a/src/mongo/db/repl/primary_only_service_op_observer.h +++ b/src/mongo/db/repl/primary_only_service_op_observer.h @@ -58,8 +58,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -219,6 +218,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp index 402da18360a..5db2cc7a7c8 100644 --- a/src/mongo/db/repl/repl_client_info.cpp +++ b/src/mongo/db/repl/repl_client_info.cpp @@ -151,14 +151,15 @@ void ReplClientInfo::setLastOpToSystemLastOpTime(OperationContext* opCtx) { } } -void ReplClientInfo::setLastOpToSystemLastOpTimeIgnoringCtxInterrupted(OperationContext* opCtx) { +void ReplClientInfo::setLastOpToSystemLastOpTimeIgnoringShutdownCtxCancelled( + OperationContext* opCtx) { try { repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); } catch (DBException& e) { - if (opCtx && !opCtx->checkForInterruptNoAssert().isOK()) { - // In most cases, it is safe to ignore all errors when the OperationContext is - // interrupted because we cannot use that OperationContext to wait for writeConcern - // anyways. + if (e.isA() || + (opCtx && opCtx->getKillStatus() != ErrorCodes::OK)) { + // In most cases, it is safe to ignore shutdown and context cancellation errors because + // we cannot use the same OperationContext to wait for writeConcern anyways. LOGV2_DEBUG(21281, 2, "Ignoring set last op error: {error}", @@ -166,7 +167,7 @@ void ReplClientInfo::setLastOpToSystemLastOpTimeIgnoringCtxInterrupted(Operation "error"_attr = e.toStatus()); return; } - // Context was not interrupted, throw error up to caller. + // Context was not cancelled and is not of type ShutdownError, throw error up to caller. throw; } } diff --git a/src/mongo/db/repl/repl_client_info.h b/src/mongo/db/repl/repl_client_info.h index 875f8c3a86e..ee4115879af 100644 --- a/src/mongo/db/repl/repl_client_info.h +++ b/src/mongo/db/repl/repl_client_info.h @@ -108,10 +108,9 @@ public: void setLastOpToSystemLastOpTime(OperationContext* opCtx); /** - * Same as setLastOpToSystemLastOpTime but ignores errors if the OperationContext is - * interrupted. + * Same as setLastOpToSystemLastOpTime but ignores ShutdownError and context cancelled. */ - void setLastOpToSystemLastOpTimeIgnoringCtxInterrupted(OperationContext* opCtx); + void setLastOpToSystemLastOpTimeIgnoringShutdownCtxCancelled(OperationContext* opCtx); private: static const long long kUninitializedTerm = -1; diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index 2feb1ed6b7b..a943637c2e5 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -88,7 +88,6 @@ #include "mongo/db/repl/update_position_args.h" #include "mongo/db/repl/vote_requester.h" #include "mongo/db/server_options.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/shutdown_in_progress_quiesce_info.h" @@ -535,7 +534,6 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig( } tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); - ServerlessOperationLockRegistry::recoverLocks(opCtx); LOGV2(4280506, "Reconstructing prepared transactions"); reconstructPreparedTransactions(opCtx, OplogApplication::Mode::kRecovering); diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp index 9f7f2e5863d..101ddcf0bb3 100644 --- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp +++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp @@ -192,8 +192,6 @@ void ReplCoordTest::start() { // Skip recovering user writes critical sections for the same reason as the above. FailPointEnableBlock skipRecoverUserWriteCriticalSections( "skipRecoverUserWriteCriticalSections"); - // Skip recovering of serverless mutual exclusion locks for the same reason as the above. - FailPointEnableBlock skipRecoverServerlessOperationLock("skipRecoverServerlessOperationLock"); invariant(!_callShutdown); // if we haven't initialized yet, do that first. if (!_repl) { diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index a1cec2f9309..fa3d51489a8 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -44,7 +44,6 @@ #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/global_index.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/logical_time_validator.h" #include "mongo/db/operation_context.h" @@ -59,7 +58,6 @@ #include "mongo/db/repl/transaction_oplog_application.h" #include "mongo/db/s/type_shard_identity.h" #include "mongo/db/server_recovery.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" @@ -92,8 +90,6 @@ RollbackImpl::Listener kNoopListener; constexpr auto kInsertCmdName = "insert"_sd; constexpr auto kUpdateCmdName = "update"_sd; constexpr auto kDeleteCmdName = "delete"_sd; -constexpr auto kInsertGlobalIndexKeyCmdName = "insertGlobalIndexKey"_sd; -constexpr auto kDeleteGlobalIndexKeyCmdName = "deleteGlobalIndexKey"_sd; constexpr auto kNumRecordsFieldName = "numRecords"_sd; constexpr auto kToFieldName = "to"_sd; constexpr auto kDropTargetFieldName = "dropTarget"_sd; @@ -604,13 +600,13 @@ void RollbackImpl::_runPhaseFromAbortToReconstructPreparedTxns( auto it = m.find(key); return (it == m.end()) ? 0 : it->second; }; - LOGV2(6984700, - "Operations reverted by rollback", + LOGV2(21599, + "Rollback reverted {insert} insert operations, {update} update operations and {delete} " + "delete operations.", + "Rollback reverted command counts", "insert"_attr = getCommandCount(kInsertCmdName), "update"_attr = getCommandCount(kUpdateCmdName), - "delete"_attr = getCommandCount(kDeleteCmdName), - "insertGlobalIndexKey"_attr = getCommandCount(kInsertGlobalIndexKeyCmdName), - "deleteGlobalIndexKey"_attr = getCommandCount(kDeleteGlobalIndexKeyCmdName)); + "delete"_attr = getCommandCount(kDeleteCmdName)); // Retryable writes create derived updates to the transactions table which can be coalesced into // one operation, so certain session operations history may be lost after restoring to the @@ -656,7 +652,6 @@ void RollbackImpl::_runPhaseFromAbortToReconstructPreparedTxns( _correctRecordStoreCounts(opCtx); tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); - ServerlessOperationLockRegistry::recoverLocks(opCtx); // Reconstruct prepared transactions after counts have been adjusted. Since prepared // transactions were aborted (i.e. the in-memory counts were rolled-back) before computing @@ -922,21 +917,12 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr // Keep track of the _ids of inserted and updated documents, as we may need to write them out to // a rollback file. - if (opType == OpTypeEnum::kInsert || opType == OpTypeEnum::kUpdate || - opType == OpTypeEnum::kInsertGlobalIndexKey) { + if (opType == OpTypeEnum::kInsert || opType == OpTypeEnum::kUpdate) { const auto uuid = oplogEntry.getUuid(); invariant(uuid, str::stream() << "Oplog entry to roll back is unexpectedly missing a UUID: " << redact(oplogEntry.toBSONForLogging())); - const auto idElem = [&]() { - if (opType == OpTypeEnum::kInsertGlobalIndexKey) { - // As global indexes currently lack support for multi-key, a key can be uniquely - // identified by its document key, which maps the _id field in the global index - // container (collection). - return oplogEntry.getObject()[global_index::kOplogEntryDocKeyFieldName]; - } - return oplogEntry.getIdElement(); - }(); + const auto idElem = oplogEntry.getIdElement(); if (!idElem.eoo()) { // We call BSONElement::wrap() on each _id element to create a new BSONObj with an owned // buffer, as the underlying storage may be gone when we access this map to write @@ -947,7 +933,7 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr } } - if (opType == OpTypeEnum::kInsert || opType == OpTypeEnum::kInsertGlobalIndexKey) { + if (opType == OpTypeEnum::kInsert) { auto idVal = oplogEntry.getObject().getStringField("_id"); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer && opNss == NamespaceString::kServerConfigurationNamespace && @@ -971,12 +957,11 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr // Rolling back an insert must decrement the count by 1. _countDiffs[oplogEntry.getUuid().value()] -= 1; - } else if (opType == OpTypeEnum::kDelete || opType == OpTypeEnum::kDeleteGlobalIndexKey) { + } else if (opType == OpTypeEnum::kDelete) { // Rolling back a delete must increment the count by 1. _countDiffs[oplogEntry.getUuid().value()] += 1; } else if (opType == OpTypeEnum::kCommand) { - if (oplogEntry.getCommandType() == OplogEntry::CommandType::kCreate || - oplogEntry.getCommandType() == OplogEntry::CommandType::kCreateGlobalIndex) { + if (oplogEntry.getCommandType() == OplogEntry::CommandType::kCreate) { // If we roll back a create, then we do not need to change the size of that uuid. _countDiffs.erase(oplogEntry.getUuid().value()); _pendingDrops.erase(oplogEntry.getUuid().value()); @@ -997,15 +982,13 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr _pendingDrops.erase(importTargetUUID); _newCounts.erase(importTargetUUID); } - } else if (oplogEntry.getCommandType() == OplogEntry::CommandType::kDrop || - oplogEntry.getCommandType() == OplogEntry::CommandType::kDropGlobalIndex) { - // The collection count at collection drop time is op-logged in the 'o2' field. - // In the common case where the drop-pending collection is managed by the storage - // engine, the collection metadata - including the number of records at drop time - - // is not accessible through the catalog. - // Keep track of the record count stored in the 'o2' field via the _newCounts variable. - // This allows for cheaply restoring the collection count post rollback without an - // expensive collection scan. + } else if (oplogEntry.getCommandType() == OplogEntry::CommandType::kDrop) { + // If we roll back a collection drop, parse the o2 field for the collection count for + // use later by _findRecordStoreCounts(). + // This will be used to reconcile collection counts in the case where the drop-pending + // collection is managed by the storage engine and is not accessible through the UUID + // catalog. + // Adding a _newCounts entry ensures that the count will be set after the rollback. const auto uuid = oplogEntry.getUuid().value(); invariant(_countDiffs.find(uuid) == _countDiffs.end(), str::stream() << "Unexpected existing count diff for " << uuid.toString() @@ -1075,12 +1058,6 @@ Status RollbackImpl::_processRollbackOp(OperationContext* opCtx, const OplogEntr if (opType == OpTypeEnum::kDelete) { ++_observerInfo.rollbackCommandCounts[kDeleteCmdName]; } - if (opType == OpTypeEnum::kInsertGlobalIndexKey) { - ++_observerInfo.rollbackCommandCounts[kInsertGlobalIndexKeyCmdName]; - } - if (opType == OpTypeEnum::kDeleteGlobalIndexKey) { - ++_observerInfo.rollbackCommandCounts[kDeleteGlobalIndexKeyCmdName]; - } return Status::OK(); } diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index c357a1b411e..dad1cbc45df 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -232,11 +232,13 @@ StorageInterfaceImpl::createCollectionForBulkLoading( .setFlags(DocumentValidationSettings::kDisableSchemaValidation | DocumentValidationSettings::kDisableInternalValidation); + std::unique_ptr autoColl; // Retry if WCE. Status status = writeConflictRetry(opCtx.get(), "beginCollectionClone", nss.ns(), [&] { UnreplicatedWritesBlock uwb(opCtx.get()); // Get locks and create the collection. + AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_IX); AutoGetCollection coll(opCtx.get(), nss, fixLockModeForSystemDotViewsChanges(nss, MODE_X)); if (coll) { return Status(ErrorCodes::NamespaceExists, @@ -245,29 +247,35 @@ StorageInterfaceImpl::createCollectionForBulkLoading( { // Create the collection. WriteUnitOfWork wunit(opCtx.get()); - auto db = coll.ensureDbExists(opCtx.get()); + auto db = autoDb.ensureDbExists(opCtx.get()); fassert(40332, db->createCollection(opCtx.get(), nss, options, false)); wunit.commit(); } + autoColl = std::make_unique( + opCtx.get(), nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX)); + // Build empty capped indexes. Capped indexes cannot be built by the MultiIndexBlock // because the cap might delete documents off the back while we are inserting them into // the front. if (options.capped) { WriteUnitOfWork wunit(opCtx.get()); - // `getWritableCollection` will return the newly created collection even if it didn't - // exist when the AutoGet was created. - auto writableCollection = coll.getWritableCollection(opCtx.get()); if (!idIndexSpec.isEmpty()) { - auto status = writableCollection->getIndexCatalog()->createIndexOnEmptyCollection( - opCtx.get(), writableCollection, idIndexSpec); + auto status = + autoColl->getWritableCollection(opCtx.get()) + ->getIndexCatalog() + ->createIndexOnEmptyCollection( + opCtx.get(), autoColl->getWritableCollection(opCtx.get()), idIndexSpec); if (!status.getStatus().isOK()) { return status.getStatus(); } } for (auto&& spec : secondaryIndexSpecs) { - auto status = writableCollection->getIndexCatalog()->createIndexOnEmptyCollection( - opCtx.get(), writableCollection, spec); + auto status = + autoColl->getWritableCollection(opCtx.get()) + ->getIndexCatalog() + ->createIndexOnEmptyCollection( + opCtx.get(), autoColl->getWritableCollection(opCtx.get()), spec); if (!status.getStatus().isOK()) { return status.getStatus(); } @@ -283,8 +291,11 @@ StorageInterfaceImpl::createCollectionForBulkLoading( } // Move locks into loader, so it now controls their lifetime. - auto loader = std::make_unique( - Client::releaseCurrent(), std::move(opCtx), nss, options.capped ? BSONObj() : idIndexSpec); + auto loader = + std::make_unique(Client::releaseCurrent(), + std::move(opCtx), + std::move(autoColl), + options.capped ? BSONObj() : idIndexSpec); status = loader->init(options.capped ? std::vector() : secondaryIndexSpecs); if (!status.isOK()) { diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp index 360c0599db6..85e3f012bf0 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp @@ -88,7 +88,7 @@ void TenantMigrationAccessBlockerRegistry::add(StringData tenantId, if (it != _tenantMigrationAccessBlockers.end()) { auto existingMtab = it->second.getAccessBlocker(mtabType); if (existingMtab) { - uasserted(ErrorCodes::ConflictingServerlessOperation, + tasserted(ErrorCodes::ConflictingOperationInProgress, str::stream() << "This node is already a " << (mtabType == MtabType::kDonor ? "donor" : "recipient") << " for tenantId \"" << tenantId << "\" with migrationId \"" @@ -121,7 +121,7 @@ void TenantMigrationAccessBlockerRegistry::add(std::shared_ptrgetServiceContext()) - .acquireLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, - donorStateDoc.getId()); - auto mtab = std::make_shared(opCtx->getServiceContext(), donorStateDoc.getId()); if (donorStateDoc.getProtocol().value_or(MigrationProtocolEnum::kMultitenantMigrations) == @@ -74,9 +69,6 @@ void onTransitionToAbortingIndexBuilds(OperationContext* opCtx, TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .remove(donorStateDoc.getTenantId(), TenantMigrationAccessBlocker::BlockerType::kDonor); - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, - donorStateDoc.getId()); }); } } else { @@ -93,9 +85,6 @@ void onTransitionToAbortingIndexBuilds(OperationContext* opCtx, TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAccessBlockersForMigration( donorStateDoc.getId(), TenantMigrationAccessBlocker::BlockerType::kDonor); - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, - donorStateDoc.getId()); }); } } @@ -166,10 +155,6 @@ public: void commit(OperationContext* opCtx, boost::optional) override { if (_donorStateDoc.getExpireAt()) { - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, - _donorStateDoc.getId()); - auto mtab = tenant_migration_access_blocker::getTenantMigrationDonorAccessBlocker( opCtx->getServiceContext(), _donorStateDoc.getTenantId()); @@ -353,9 +338,6 @@ repl::OpTime TenantMigrationDonorOpObserver::onDropCollection(OperationContext* opCtx->recoveryUnit()->onCommit([opCtx](boost::optional) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAll(TenantMigrationAccessBlocker::BlockerType::kDonor); - - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .onDropStateCollection(ServerlessOperationLockRegistry::LockType::kTenantDonor); }); } return {}; diff --git a/src/mongo/db/repl/tenant_migration_donor_op_observer.h b/src/mongo/db/repl/tenant_migration_donor_op_observer.h index 842a43926ab..2c969e5ac27 100644 --- a/src/mongo/db/repl/tenant_migration_donor_op_observer.h +++ b/src/mongo/db/repl/tenant_migration_donor_op_observer.h @@ -56,8 +56,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -216,6 +215,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp index 0da1de32c4e..1d3f4cdf1e2 100644 --- a/src/mongo/db/repl/tenant_migration_donor_service.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp @@ -123,6 +123,7 @@ void checkForTokenInterrupt(const CancellationToken& token) { uassert(ErrorCodes::CallbackCanceled, "Donor service interrupted", !token.isCanceled()); } + template void setPromiseFromStatusIfNotReady(WithLock lk, Promise& promise, Status status) { if (promise.getFuture().isReady()) { @@ -154,17 +155,6 @@ void setPromiseOkIfNotReady(WithLock lk, Promise& promise) { promise.emplaceValue(); } -bool isNotDurableAndServerlessConflict(WithLock lk, SharedPromise& promise) { - auto future = promise.getFuture(); - - if (!future.isReady() || - future.getNoThrow().code() != ErrorCodes::ConflictingServerlessOperation) { - return false; - } - - return true; -} - } // namespace void TenantMigrationDonorService::checkIfConflictsWithOtherInstances( @@ -525,16 +515,7 @@ ExecutorFuture TenantMigrationDonorService::Instance::_insertState return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); }) - .until([&](StatusWith swOpTime) { - if (swOpTime.getStatus().code() == ErrorCodes::ConflictingServerlessOperation) { - LOGV2(6531508, - "Tenant migration completed due to serverless lock error", - "id"_attr = _migrationUuid, - "status"_attr = swOpTime.getStatus()); - uassertStatusOK(swOpTime); - } - return swOpTime.getStatus().isOK(); - }) + .until([](StatusWith swOpTime) { return swOpTime.getStatus().isOK(); }) .withBackoffBetweenIterations(kExponentialBackoff) .on(**executor, token); } @@ -969,8 +950,6 @@ SemiFuture TenantMigrationDonorService::Instance::run( ->incTotalMigrationDonationsCommitted(); } } - - return Status::OK(); }) .then([this, self = shared_from_this(), executor, token, recipientTargeterRS] { return _waitForForgetMigrationThenMarkMigrationGarbageCollectable( @@ -998,13 +977,6 @@ SemiFuture TenantMigrationDonorService::Instance::run( "tenantId"_attr = _tenantId, "status"_attr = status, "abortReason"_attr = _abortReason); - - // If a ConflictingServerlessOperation was thrown during the initial insertion we do not - // have a state document. In that case return the error to PrimaryOnlyService so it - // frees the instance from its map. - if (isNotDurableAndServerlessConflict(lg, _initialDonorStateDurablePromise)) { - uassertStatusOK(_initialDonorStateDurablePromise.getFuture().getNoThrow()); - } }) .semi(); } @@ -1391,6 +1363,7 @@ ExecutorFuture TenantMigrationDonorService::Instance::_handleErrorOrEnterA checkForTokenInterrupt(token); { + stdx::lock_guard lg(_mutex); if (_stateDoc.getState() == TenantMigrationDonorStateEnum::kAborted) { // The migration was resumed on stepup and it was already aborted. return ExecutorFuture(**executor); @@ -1447,21 +1420,6 @@ TenantMigrationDonorService::Instance::_waitForForgetMigrationThenMarkMigrationG const std::shared_ptr& executor, std::shared_ptr recipientTargeterRS, const CancellationToken& token) { - const bool skipWaitingForForget = [&]() { - stdx::lock_guard lg(_mutex); - if (!isNotDurableAndServerlessConflict(lg, _initialDonorStateDurablePromise)) { - return false; - } - setPromiseErrorIfNotReady(lg, - _receiveDonorForgetMigrationPromise, - _initialDonorStateDurablePromise.getFuture().getNoThrow()); - return true; - }(); - - if (skipWaitingForForget) { - return ExecutorFuture(**executor); - } - LOGV2(6104909, "Waiting to receive 'donorForgetMigration' command.", "migrationId"_attr = _migrationUuid, @@ -1487,16 +1445,6 @@ TenantMigrationDonorService::Instance::_waitForForgetMigrationThenMarkMigrationG return std::move(_receiveDonorForgetMigrationPromise.getFuture()) .thenRunOn(**executor) .then([this, self = shared_from_this(), executor, recipientTargeterRS, token] { - { - // If the abortReason is ConflictingServerlessOperation, it means there are no - // document on the recipient. Do not send the forget command. - stdx::lock_guard lg(_mutex); - if (_abortReason && - _abortReason->code() == ErrorCodes::ConflictingServerlessOperation) { - return ExecutorFuture(**executor); - } - } - LOGV2(6104910, "Waiting for recipientForgetMigration response.", "migrationId"_attr = _migrationUuid, @@ -1539,12 +1487,6 @@ TenantMigrationDonorService::Instance::_waitForForgetMigrationThenMarkMigrationG ExecutorFuture TenantMigrationDonorService::Instance::_waitForGarbageCollectionDelayThenDeleteStateDoc( const std::shared_ptr& executor, const CancellationToken& token) { - // If the state document was not inserted due to a conflicting serverless operation, do not - // try to delete it. - stdx::lock_guard lg(_mutex); - if (isNotDurableAndServerlessConflict(lg, _initialDonorStateDurablePromise)) { - return ExecutorFuture(**executor); - } LOGV2(8423362, "Waiting for garbage collection delay before deleting state document", @@ -1552,6 +1494,7 @@ TenantMigrationDonorService::Instance::_waitForGarbageCollectionDelayThenDeleteS "tenantId"_attr = _tenantId, "expireAt"_attr = *_stateDoc.getExpireAt()); + stdx::lock_guard lg(_mutex); return (*executor) ->sleepUntil(*_stateDoc.getExpireAt(), token) .then([this, self = shared_from_this(), executor, token]() { diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp index 19f5f6ab71c..e2a047876bf 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp @@ -41,7 +41,6 @@ #include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_migration_util.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/logv2/log.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -180,18 +179,6 @@ void TenantMigrationRecipientOpObserver::onInserts( std::vector::const_iterator first, std::vector::const_iterator last, bool fromMigrate) { - if (coll->ns() == NamespaceString::kTenantMigrationRecipientsNamespace && - !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { - for (auto it = first; it != last; it++) { - auto recipientStateDoc = TenantMigrationRecipientDocument::parse( - IDLParserContext("recipientStateDoc"), it->doc); - if (!recipientStateDoc.getExpireAt()) { - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .acquireLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, - recipientStateDoc.getId()); - } - } - } if (!shard_merge_utils::isDonatedFilesCollection(coll->ns())) { return; @@ -217,10 +204,6 @@ void TenantMigrationRecipientOpObserver::onUpdate(OperationContext* opCtx, repl::TenantFileImporterService::get(opCtx->getServiceContext()) ->interrupt(recipientStateDoc.getId()); - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, - recipientStateDoc.getId()); - std::vector tenantIdsToRemove; auto cleanUpBlockerIfGarbage = [&](std::string tenantId, std::shared_ptr& mtab) { @@ -329,9 +312,6 @@ repl::OpTime TenantMigrationRecipientOpObserver::onDropCollection( repl::TenantFileImporterService::get(opCtx->getServiceContext())->interruptAll(); TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAll(TenantMigrationAccessBlocker::BlockerType::kRecipient); - - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .onDropStateCollection(ServerlessOperationLockRegistry::LockType::kTenantRecipient); }); } return {}; diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.h b/src/mongo/db/repl/tenant_migration_recipient_op_observer.h index b62a276b86d..c333dff1bbe 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.h +++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.h @@ -57,8 +57,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -218,6 +217,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp index ab7e7ef12e1..869670c6ca3 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp @@ -2968,8 +2968,7 @@ SemiFuture TenantMigrationRecipientService::Instance::run( // Handle recipientForgetMigration. stdx::lock_guard lk(_mutex); if (_stateDoc.getExpireAt() || - MONGO_unlikely(autoRecipientForgetMigration.shouldFail()) || - status.code() == ErrorCodes::ConflictingServerlessOperation) { + MONGO_unlikely(autoRecipientForgetMigration.shouldFail())) { // Skip waiting for the recipientForgetMigration command. setPromiseOkifNotReady(lk, _receivedRecipientForgetMigrationPromise); } @@ -3019,16 +3018,7 @@ SemiFuture TenantMigrationRecipientService::Instance::run( // is safe even on shutDown/stepDown. stdx::lock_guard lk(_mutex); invariant(_dataSyncCompletionPromise.getFuture().isReady()); - - if (status.code() == ErrorCodes::ConflictingServerlessOperation) { - LOGV2(6531506, - "Migration failed as another serverless operation was in progress", - "migrationId"_attr = getMigrationUUID(), - "tenantId"_attr = getTenantId(), - "status"_attr = status); - setPromiseOkifNotReady(lk, _forgetMigrationDurablePromise); - return status; - } else if (!status.isOK()) { + if (!status.isOK()) { // We should only hit here on a stepDown/shutDown, or a 'conflicting migration' // error. LOGV2(4881402, @@ -3039,8 +3029,6 @@ SemiFuture TenantMigrationRecipientService::Instance::run( setPromiseErrorifNotReady(lk, _forgetMigrationDurablePromise, status); } _taskState.setState(TaskState::kDone); - - return Status::OK(); }) .semi(); } diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp index ea95a8b4b06..62566ec6d55 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp +++ b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp @@ -216,20 +216,12 @@ struct IndexSpec { }; /** - * To be used for finding the index that can be used as a hint for the aggregate command for - * calculating the cardinality and frequency metrics. - * * Returns the IndexSpec for the index that has the given shard key as a prefix, ignoring the index - * type (i.e. hashed or range) since the grouping inside the aggregation works with both the - * original field values and the hashes of the field values. The index must meet the following - * requirements: - * - It must have simple collation since that is the only supported collation for shard key string - * fields comparisons. - * - It must not be sparse since such an index omits documents that have null/missing index - * key fields. - * - It must not be partial since such an index omits documents do not match the specified - * filter. - * - It must not be multi-key since a shard key field cannot be an array. + * type (i.e. hashed or range). To be used for finding the index that can be used as a hint for the + * aggregate command for calculating the cardinality and frequency metrics (the aggregation pipeline + * works with both the original field values or by the hashes of the field values). The index must + * have simple collation since that is the only supported collation for shard key string fields + * comparisons. */ boost::optional findCompatiblePrefixedIndex(OperationContext* opCtx, const CollectionPtr& collection, @@ -250,8 +242,8 @@ boost::optional findCompatiblePrefixedIndex(OperationContext* opCtx, auto indexEntry = indexIterator->next(); auto indexDesc = indexEntry->descriptor(); auto indexKey = indexDesc->keyPattern(); - if (indexDesc->collation().isEmpty() && !indexDesc->isSparse() && !indexDesc->isPartial() && - !indexEntry->isMultikey(opCtx, collection) && shardKey.isFieldNamePrefixOf(indexKey)) { + if (indexDesc->collation().isEmpty() && !indexEntry->isMultikey(opCtx, collection) && + shardKey.isFieldNamePrefixOf(indexKey)) { return IndexSpec{indexKey, indexDesc->unique()}; } } diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp index b0057770471..ec93541b311 100644 --- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp @@ -128,7 +128,7 @@ public: const NamespaceString nss = ns(); auto migratedChunk = toChunkType(request().getMigratedChunk()); - StatusWith response = + StatusWith chunkVersionResponse = ShardingCatalogManager::get(opCtx)->commitChunkMigration( opCtx, nss, @@ -139,9 +139,9 @@ public: request().getToShard(), request().getValidAfter()); - auto shardAndCollVers = uassertStatusOK(response); + auto chunkVersionObj = uassertStatusOK(chunkVersionResponse); - return Response{shardAndCollVers.shardVersion}; + return Response{ChunkVersion::parse(chunkVersionObj[ChunkVersion::kChunkVersionField])}; } private: diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp index 15d02fe8a98..430cb5c25b9 100644 --- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp @@ -87,7 +87,7 @@ public: repl::ReadConcernArgs::get(opCtx) = repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); - const auto shardAndCollVers = uassertStatusOK( + const BSONObj shardAndCollVers = uassertStatusOK( ShardingCatalogManager::get(opCtx)->commitChunksMerge(opCtx, ns(), request().getEpoch(), @@ -96,7 +96,8 @@ public: request().getChunkRange(), request().getShard(), request().getValidAfter())); - return ConfigSvrMergeResponse{shardAndCollVers.shardVersion}; + return ConfigSvrMergeResponse{ + ChunkVersion::parse(shardAndCollVers[ChunkVersion::kChunkVersionField])}; } private: diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp index 37ac4d023b6..713f7252356 100644 --- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp +++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp @@ -65,9 +65,6 @@ using std::string; * writeConcern: * } */ - -constexpr StringData kCollectionVersionField = "collectionVersion"_sd; - class ConfigSvrSplitChunkCommand : public BasicCommand { public: ConfigSvrSplitChunkCommand() : BasicCommand("_configsvrCommitChunkSplit") {} @@ -132,9 +129,7 @@ public: parsedRequest.getSplitPoints(), parsedRequest.getShardName(), parsedRequest.isFromChunkSplitter())); - - shardAndCollVers.collectionVersion.serialize(kCollectionVersionField, &result); - shardAndCollVers.shardVersion.serialize(ChunkVersion::kChunkVersionField, &result); + result.appendElements(shardAndCollVers); return true; } diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index 6b02aeb4d53..6628104d73d 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -462,7 +462,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) { opCtx, NamespaceString::kConfigsvrPlacementHistoryNamespace, BSON(NamespacePlacementType::kNssFieldName - << 1 << NamespacePlacementType::kTimestampFieldName << -1), + << 1 << NamespacePlacementType::kTimestampFieldName << 1), unique); if (!result.isOK()) { return result; diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index 8cf63f2da69..c1f94dbc46a 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -91,11 +91,6 @@ public: std::unique_ptr addShardExecutor); ~ShardingCatalogManager(); - struct ShardAndCollectionVersion { - ChunkVersion shardVersion; - ChunkVersion collectionVersion; - }; - /** * Instantiates an instance of the sharding catalog manager and installs it on the specified * service context. This method is not thread-safe and must be called only once when the service @@ -247,20 +242,18 @@ public: * Updates metadata in the config.chunks collection to show the given chunk as split into * smaller chunks at the specified split points. * - * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the - * migration: + * Returns a BSON object with the newly produced chunk versions after the migration: * - shardVersion - The new shard version of the source shard * - collectionVersion - The new collection version after the commit */ - StatusWith commitChunkSplit( - OperationContext* opCtx, - const NamespaceString& nss, - const OID& requestEpoch, - const boost::optional& requestTimestamp, - const ChunkRange& range, - const std::vector& splitPoints, - const std::string& shardName, - bool fromChunkSplitter); + StatusWith commitChunkSplit(OperationContext* opCtx, + const NamespaceString& nss, + const OID& requestEpoch, + const boost::optional& requestTimestamp, + const ChunkRange& range, + const std::vector& splitPoints, + const std::string& shardName, + bool fromChunkSplitter); /** * Updates metadata in the config.chunks collection so the chunks within the specified key range @@ -268,40 +261,36 @@ public: * If 'validAfter' is not set, this means the commit request came from an older server version, * which is not history-aware. * - * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the - * migration: + * Returns a BSON object with the newly produced chunk versions after the migration: * - shardVersion - The new shard version of the source shard * - collectionVersion - The new collection version after the commit */ - StatusWith commitChunksMerge( - OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& epoch, - const boost::optional& timestamp, - const UUID& requestCollectionUUID, - const ChunkRange& chunkRange, - const ShardId& shardId, - const boost::optional& validAfter); + StatusWith commitChunksMerge(OperationContext* opCtx, + const NamespaceString& nss, + const boost::optional& epoch, + const boost::optional& timestamp, + const UUID& requestCollectionUUID, + const ChunkRange& chunkRange, + const ShardId& shardId, + const boost::optional& validAfter); /** * Updates metadata in config.chunks collection to show the given chunk in its new shard. * If 'validAfter' is not set, this means the commit request came from an older server version, * which is not history-aware. * - * Returns a ShardAndCollectionVersion object with the newly produced chunk versions after the - * migration: + * Returns a BSON object with the newly produced chunk versions after the migration: * - shardVersion - The new shard version of the source shard * - collectionVersion - The new collection version after the commit */ - StatusWith commitChunkMigration( - OperationContext* opCtx, - const NamespaceString& nss, - const ChunkType& migratedChunk, - const OID& collectionEpoch, - const Timestamp& collectionTimestamp, - const ShardId& fromShard, - const ShardId& toShard, - const boost::optional& validAfter); + StatusWith commitChunkMigration(OperationContext* opCtx, + const NamespaceString& nss, + const ChunkType& migratedChunk, + const OID& collectionEpoch, + const Timestamp& collectionTimestamp, + const ShardId& fromShard, + const ShardId& toShard, + const boost::optional& validAfter); /** * Removes the jumbo flag from the specified chunk. diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 55381ae4041..23083a77590 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -73,6 +73,8 @@ MONGO_FAIL_POINT_DEFINE(skipExpiringOldChunkHistory); const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)); +constexpr StringData kCollectionVersionField = "collectionVersion"_sd; + /** * Append min, max and version information from chunk to the buffer for logChange purposes. */ @@ -607,15 +609,15 @@ ShardingCatalogManager::_splitChunkInTransaction(OperationContext* opCtx, sharedBlock->newChunks}; } -StatusWith -ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, - const NamespaceString& nss, - const OID& requestEpoch, - const boost::optional& requestTimestamp, - const ChunkRange& range, - const std::vector& splitPoints, - const std::string& shardName, - const bool fromChunkSplitter) { +StatusWith ShardingCatalogManager::commitChunkSplit( + OperationContext* opCtx, + const NamespaceString& nss, + const OID& requestEpoch, + const boost::optional& requestTimestamp, + const ChunkRange& range, + const std::vector& splitPoints, + const std::string& shardName, + const bool fromChunkSplitter) { // Mark opCtx as interruptible to ensure that all reads and writes to the metadata collections // under the exclusive _kChunkOpLock happen on the same term. @@ -716,8 +718,10 @@ ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, } } - return ShardAndCollectionVersion{splitChunkResult.currentMaxVersion /*shardVersion*/, - splitChunkResult.currentMaxVersion /*collectionVersion*/}; + BSONObjBuilder response; + splitChunkResult.currentMaxVersion.serialize(kCollectionVersionField, &response); + splitChunkResult.currentMaxVersion.serialize(ChunkVersion::kChunkVersionField, &response); + return response.obj(); } void ShardingCatalogManager::_mergeChunksInTransaction( @@ -825,15 +829,15 @@ void ShardingCatalogManager::_mergeChunksInTransaction( txn.run(opCtx, updateChunksFn); } -StatusWith -ShardingCatalogManager::commitChunksMerge(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& epoch, - const boost::optional& timestamp, - const UUID& requestCollectionUUID, - const ChunkRange& chunkRange, - const ShardId& shardId, - const boost::optional& validAfter) { +StatusWith ShardingCatalogManager::commitChunksMerge( + OperationContext* opCtx, + const NamespaceString& nss, + const boost::optional& epoch, + const boost::optional& timestamp, + const UUID& requestCollectionUUID, + const ChunkRange& chunkRange, + const ShardId& shardId, + const boost::optional& validAfter) { if (!validAfter) { return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"}; } @@ -903,14 +907,15 @@ ShardingCatalogManager::commitChunksMerge(OperationContext* opCtx, << " does not contain a sequence of chunks that exactly fills the range " << chunkRange.toString(), chunk.getRange() == chunkRange); - + BSONObjBuilder response; + collVersion.serialize(kCollectionVersionField, &response); const auto currentShardVersion = getShardVersion(opCtx, coll, shardId, collVersion); - + currentShardVersion.serialize(ChunkVersion::kChunkVersionField, &response); // Makes sure that the last thing we read in getCollectionVersion and getShardVersion gets // majority written before to return from this command, otherwise next RoutingInfo cache // refresh from the shard may not see those newest information. repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); - return ShardAndCollectionVersion{currentShardVersion, collVersion}; + return response.obj(); } // 3. Prepare the data for the merge @@ -967,18 +972,21 @@ ShardingCatalogManager::commitChunksMerge(OperationContext* opCtx, ShardingLogging::get(opCtx)->logChange( opCtx, "merge", nss.ns(), logDetail.obj(), WriteConcernOptions()); - return ShardAndCollectionVersion{mergeVersion /*shardVersion*/, mergeVersion /*collVersion*/}; + BSONObjBuilder response; + mergeVersion.serialize(kCollectionVersionField, &response); + mergeVersion.serialize(ChunkVersion::kChunkVersionField, &response); + return response.obj(); } -StatusWith -ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, - const NamespaceString& nss, - const ChunkType& migratedChunk, - const OID& collectionEpoch, - const Timestamp& collectionTimestamp, - const ShardId& fromShard, - const ShardId& toShard, - const boost::optional& validAfter) { +StatusWith ShardingCatalogManager::commitChunkMigration( + OperationContext* opCtx, + const NamespaceString& nss, + const ChunkType& migratedChunk, + const OID& collectionEpoch, + const Timestamp& collectionTimestamp, + const ShardId& fromShard, + const ShardId& toShard, + const boost::optional& validAfter) { if (!validAfter) { return {ErrorCodes::IllegalOperation, "chunk operation requires validAfter timestamp"}; @@ -1092,13 +1100,16 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, if (currentChunk.getShard() == toShard) { // The commit was already done successfully + BSONObjBuilder response; + currentCollectionVersion.serialize(kCollectionVersionField, &response); const auto currentShardVersion = getShardVersion(opCtx, coll, fromShard, currentCollectionVersion); + currentShardVersion.serialize(ChunkVersion::kChunkVersionField, &response); // Makes sure that the last thing we read in findChunkContainingRange, getShardVersion, and // getCollectionVersion gets majority written before to return from this command, otherwise // next RoutingInfo cache refresh from the shard may not see those newest information. repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); - return ShardAndCollectionVersion{currentShardVersion, currentCollectionVersion}; + return response.obj(); } uassert(4914702, @@ -1221,17 +1232,18 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, _commitChunkMigrationInTransaction( opCtx, nss, newMigratedChunk, newSplitChunks, newControlChunk); - ShardAndCollectionVersion response; + BSONObjBuilder response; if (!newControlChunk) { // We migrated the last chunk from the donor shard. - response.collectionVersion = newMigratedChunk->getVersion(); - response.shardVersion = ChunkVersion( + newMigratedChunk->getVersion().serialize(kCollectionVersionField, &response); + const ChunkVersion donorShardVersion( {currentCollectionVersion.epoch(), currentCollectionVersion.getTimestamp()}, {0, 0}); + donorShardVersion.serialize(ChunkVersion::kChunkVersionField, &response); } else { - response.collectionVersion = newControlChunk->getVersion(); - response.shardVersion = newControlChunk->getVersion(); + newControlChunk->getVersion().serialize(kCollectionVersionField, &response); + newControlChunk->getVersion().serialize(ChunkVersion::kChunkVersionField, &response); } - return response; + return response.obj(); } StatusWith ShardingCatalogManager::_findChunkOnConfig(OperationContext* opCtx, diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 2b627a53e3d..0f99ae11634 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -126,25 +126,25 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { setupCollection(kNamespace, kKeyPattern, {migratedChunk, controlChunk}); Timestamp validAfter{101, 0}; - auto versions = assertGet(ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - migratedChunk, - migratedChunk.getVersion().epoch(), - collTimestamp, - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter)); + BSONObj versions = assertGet(ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + migratedChunk, + migratedChunk.getVersion().epoch(), + collTimestamp, + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter)); // Verify the versions returned match expected values. - auto mver = versions.shardVersion; + auto mver = ChunkVersion::parse(versions["shardVersion"]); ASSERT_EQ(ChunkVersion( {migratedChunk.getVersion().epoch(), migratedChunk.getVersion().getTimestamp()}, {migratedChunk.getVersion().majorVersion() + 1, 1}), mver); // Verify that a collection version is returned - auto cver = versions.collectionVersion; + auto cver = ChunkVersion::parse(versions["collectionVersion"]); ASSERT_TRUE(mver.isOlderOrEqualThan(cver)); // Verify the chunks ended up in the right shards. @@ -204,26 +204,25 @@ TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectlyWithoutControlChunk) { Timestamp validAfter{101, 0}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - origVersion.epoch(), - collTimestamp, - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + origVersion.epoch(), + collTimestamp, + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); - ASSERT_OK(result.getStatus()); + ASSERT_OK(resultBSON.getStatus()); // Verify the version returned matches expected value. - auto versions = result.getValue(); - auto mver = versions.shardVersion; + BSONObj versions = resultBSON.getValue(); + auto mver = ChunkVersion::parse(versions["shardVersion"]); ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver); // Verify that a collection version is returned - auto cver = versions.collectionVersion; + auto cver = ChunkVersion::parse(versions["collectionVersion"]); ASSERT_EQ(ChunkVersion({collEpoch, collTimestamp}, {origMajorVersion + 1, 0}), cver); // Verify the chunk ended up in the right shard. @@ -271,22 +270,21 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtlTrimHistory) { // Make the time distance between the last history element large enough. Timestamp validAfter{200, 0}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - origVersion.epoch(), - collTimestamp, - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + origVersion.epoch(), + collTimestamp, + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); - ASSERT_OK(result.getStatus()); + ASSERT_OK(resultBSON.getStatus()); // Verify the version returned matches expected value. - auto versions = result.getValue(); - auto mver = versions.shardVersion; + BSONObj versions = resultBSON.getValue(); + auto mver = ChunkVersion::parse(versions["shardVersion"]); ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver); // Verify the chunk ended up in the right shard. @@ -333,18 +331,17 @@ TEST_F(CommitChunkMigrate, RejectOutOfOrderHistory) { // Make the time before the last change to trigger the failure. Timestamp validAfter{99, 0}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - origVersion.epoch(), - origVersion.getTimestamp(), - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); - - ASSERT_EQ(ErrorCodes::IncompatibleShardingMetadata, result.getStatus()); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + origVersion.epoch(), + origVersion.getTimestamp(), + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); + + ASSERT_EQ(ErrorCodes::IncompatibleShardingMetadata, resultBSON.getStatus()); } TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { @@ -389,18 +386,17 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) { Timestamp validAfter{1}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - OID::gen(), - Timestamp(52), - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); - - ASSERT_EQ(ErrorCodes::StaleEpoch, result.getStatus()); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + OID::gen(), + Timestamp(52), + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); + + ASSERT_EQ(ErrorCodes::StaleEpoch, resultBSON.getStatus()); } TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { @@ -447,18 +443,17 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) { Timestamp validAfter{1}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - origVersion.epoch(), - origVersion.getTimestamp(), - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); - - ASSERT_EQ(ErrorCodes::StaleEpoch, result.getStatus()); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + origVersion.epoch(), + origVersion.getTimestamp(), + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); + + ASSERT_EQ(ErrorCodes::StaleEpoch, resultBSON.getStatus()); } TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) { @@ -508,22 +503,21 @@ TEST_F(CommitChunkMigrate, CommitWithLastChunkOnShardShouldNotAffectOtherChunks) setupCollection(kNamespace, kKeyPattern, {chunk0, chunk1}); Timestamp validAfter{101, 0}; - StatusWith result = - ShardingCatalogManager::get(operationContext()) - ->commitChunkMigration(operationContext(), - kNamespace, - chunk0, - origVersion.epoch(), - origVersion.getTimestamp(), - ShardId(shard0.getName()), - ShardId(shard1.getName()), - validAfter); - - ASSERT_OK(result.getStatus()); + StatusWith resultBSON = ShardingCatalogManager::get(operationContext()) + ->commitChunkMigration(operationContext(), + kNamespace, + chunk0, + origVersion.epoch(), + origVersion.getTimestamp(), + ShardId(shard0.getName()), + ShardId(shard1.getName()), + validAfter); + + ASSERT_OK(resultBSON.getStatus()); // Verify the versions returned match expected values. - auto versions = result.getValue(); - auto mver = versions.shardVersion; + BSONObj versions = resultBSON.getValue(); + auto mver = ChunkVersion::parse(versions["shardVersion"]); ASSERT_EQ(ChunkVersion({origVersion.epoch(), origVersion.getTimestamp()}, {0, 0}), mver); // Verify the chunks ended up in the right shards. @@ -643,8 +637,8 @@ TEST_F(CommitChunkMigrate, RejectOlderChunkVersion) { ShardId(shard1.getName()), validAfter); - ASSERT_NOT_OK(result.getStatus()); - ASSERT_EQ(result.getStatus(), ErrorCodes::ConflictingOperationInProgress); + ASSERT_NOT_OK(result); + ASSERT_EQ(result, ErrorCodes::ConflictingOperationInProgress); } TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) { @@ -695,8 +689,8 @@ TEST_F(CommitChunkMigrate, RejectMismatchedEpoch) { ShardId(shard1.getName()), validAfter); - ASSERT_NOT_OK(result.getStatus()); - ASSERT_EQ(result.getStatus(), ErrorCodes::StaleEpoch); + ASSERT_NOT_OK(result); + ASSERT_EQ(result, ErrorCodes::StaleEpoch); } class CommitMoveRangeTest : public CommitChunkMigrate { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 2743b329022..39837c1aadd 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -117,11 +117,11 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) { _shardId, validAfter)); - auto collVersion = versions.collectionVersion; - auto shardVersion = versions.shardVersion; + auto collVersion = ChunkVersion::parse(versions["collectionVersion"]); + auto shardVersion = ChunkVersion::parse(versions["shardVersion"]); - ASSERT_TRUE(origVersion.isOlderThan(versions.shardVersion)); - ASSERT_EQ(shardVersion, collVersion); + ASSERT_TRUE(origVersion.isOlderThan(shardVersion)); + ASSERT_EQ(collVersion, shardVersion); // Check for increment on mergedChunk's minor version auto expectedShardVersion = @@ -196,15 +196,15 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) { Timestamp validAfter{100, 0}; - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunksMerge(operationContext(), - _nss1, - collEpoch, - collTimestamp, - collUuid, - rangeToBeMerged, - _shardId, - validAfter)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunksMerge(operationContext(), + _nss1, + collEpoch, + collTimestamp, + collUuid, + rangeToBeMerged, + _shardId, + validAfter)); const auto query BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( @@ -280,15 +280,15 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) { Timestamp validAfter{100, 0}; - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunksMerge(operationContext(), - _nss1, - collEpoch, - collTimestamp, - collUuid, - rangeToBeMerged, - _shardId, - validAfter)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunksMerge(operationContext(), + _nss1, + collEpoch, + collTimestamp, + collUuid, + rangeToBeMerged, + _shardId, + validAfter)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( @@ -362,15 +362,15 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) { setupCollection(_nss1, _keyPattern, {chunk, chunk2, otherChunk}); Timestamp validAfter{1}; - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunksMerge(operationContext(), - _nss1, - collEpoch, - collTimestamp, - collUuid, - rangeToBeMerged, - shardId, - validAfter)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunksMerge(operationContext(), + _nss1, + collEpoch, + collTimestamp, + collUuid, + rangeToBeMerged, + shardId, + validAfter)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( getConfigShard()->exhaustiveFindOnConfig(operationContext(), @@ -486,7 +486,7 @@ TEST_F(MergeChunkTest, NonMatchingUUIDsOfChunkAndRequestErrors) { rangeToBeMerged, _shardId, validAfter); - ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus.getStatus()); + ASSERT_EQ(ErrorCodes::InvalidUUID, mergeStatus); } TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { @@ -515,15 +515,15 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedSucceeds) { Timestamp validAfter{1}; - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunksMerge(operationContext(), - _nss1, - collEpoch, - collTimestamp, - collUuid, - rangeToBeMerged, - _shardId, - validAfter)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunksMerge(operationContext(), + _nss1, + collEpoch, + collTimestamp, + collUuid, + rangeToBeMerged, + _shardId, + validAfter)); // Verify that no change to config.chunks happened. const auto query = BSON(ChunkType::collectionUUID() << collUuid); @@ -586,15 +586,15 @@ TEST_F(MergeChunkTest, MergingChunksWithDollarPrefixShouldSucceed) { ChunkRange rangeToBeMerged(chunk1.getMin(), chunk3.getMax()); Timestamp validAfter{100, 0}; - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunksMerge(operationContext(), - _nss1, - collEpoch, - collTimestamp, - collUuid, - rangeToBeMerged, - _shardId, - validAfter)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunksMerge(operationContext(), + _nss1, + collEpoch, + collTimestamp, + collUuid, + rangeToBeMerged, + _shardId, + validAfter)); const auto query = BSON(ChunkType::collectionUUID() << collUuid); auto findResponse = uassertStatusOK( diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 4c963a28ee9..32c5ced57b4 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -105,8 +105,8 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) { splitPoints, "shard0000", false /* fromChunkSplitter*/)); - auto collVersion = versions.collectionVersion; - auto shardVersion = versions.shardVersion; + auto collVersion = ChunkVersion::parse(versions["collectionVersion"]); + auto shardVersion = ChunkVersion::parse(versions["shardVersion"]); ASSERT_TRUE(origVersion.isOlderThan(shardVersion)); ASSERT_EQ(collVersion, shardVersion); @@ -181,15 +181,15 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) { setupCollection(nss, _keyPattern, {chunk}); - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunkSplit(operationContext(), - nss, - collEpoch, - collTimestamp, - ChunkRange(chunkMin, chunkMax), - splitPoints, - "shard0000", - false /* fromChunkSplitter*/)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + nss, + collEpoch, + collTimestamp, + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000", + false /* fromChunkSplitter*/)); // First chunkDoc should have range [chunkMin, chunkSplitPoint] auto chunkDocStatus = @@ -278,15 +278,15 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) { setupCollection(nss, _keyPattern, {chunk, chunk2}); - uassertStatusOK(ShardingCatalogManager::get(operationContext()) - ->commitChunkSplit(operationContext(), - nss, - collEpoch, - collTimestamp, - ChunkRange(chunkMin, chunkMax), - splitPoints, - "shard0000", - false /* fromChunkSplitter*/)); + ASSERT_OK(ShardingCatalogManager::get(operationContext()) + ->commitChunkSplit(operationContext(), + nss, + collEpoch, + collTimestamp, + ChunkRange(chunkMin, chunkMax), + splitPoints, + "shard0000", + false /* fromChunkSplitter*/)); // First chunkDoc should have range [chunkMin, chunkSplitPoint] auto chunkDocStatus = @@ -420,7 +420,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) { splitPoints, "shard0000", false /* fromChunkSplitter*/); - ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus.getStatus()); + ASSERT_EQ(ErrorCodes::StaleEpoch, splitStatus); }; test(_nss2, Timestamp(42)); diff --git a/src/mongo/db/s/config_server_op_observer.h b/src/mongo/db/s/config_server_op_observer.h index 64411e8c2a4..9a05b36c19a 100644 --- a/src/mongo/db/s/config_server_op_observer.h +++ b/src/mongo/db/s/config_server_op_observer.h @@ -57,8 +57,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -219,6 +218,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { return nullptr; diff --git a/src/mongo/db/s/configure_query_analyzer_cmd.cpp b/src/mongo/db/s/configure_query_analyzer_cmd.cpp index 73f8441c1db..34a4274eea9 100644 --- a/src/mongo/db/s/configure_query_analyzer_cmd.cpp +++ b/src/mongo/db/s/configure_query_analyzer_cmd.cpp @@ -33,10 +33,7 @@ #include "mongo/db/commands.h" #include "mongo/db/db_raii.h" #include "mongo/db/list_collections_gen.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/persistent_task_store.h" #include "mongo/logv2/log.h" -#include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_feature_flag_gen.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/configure_query_analyzer_cmd_gen.h" @@ -49,6 +46,63 @@ namespace mongo { namespace { +void validateCommandOptions(OperationContext* opCtx, + const NamespaceString& nss, + QueryAnalyzerModeEnum mode, + boost::optional sampleRate) { + uassert(ErrorCodes::InvalidOptions, + "Cannot specify 'sampleRate' when 'mode' is \"off\"", + mode != QueryAnalyzerModeEnum::kOff || !sampleRate); + + uassert(ErrorCodes::InvalidOptions, + str::stream() << "'sampleRate' must be greater than 0", + !sampleRate || (*sampleRate > 0)); + + if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { + auto dbInfo = + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db())); + + ListCollections listCollections; + listCollections.setDbName(nss.db()); + listCollections.setFilter(BSON("name" << nss.coll())); + + auto cmdResponse = executeCommandAgainstDatabasePrimary( + opCtx, + nss.db(), + dbInfo, + CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({})), + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + Shard::RetryPolicy::kIdempotent); + auto remoteResponse = uassertStatusOK(cmdResponse.swResponse); + uassertStatusOK(getStatusFromCommandResult(remoteResponse.data)); + + auto firstBatch = remoteResponse.data.firstElement()["firstBatch"].Obj(); + BSONObjIterator it(firstBatch); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Cannot analyze queries for a non-existing collection", + it.more()); + + auto collection = it.next().Obj().getOwned(); + uassert(ErrorCodes::CommandNotSupportedOnView, + "Cannot analyze queries for a view", + collection.getStringField("type") != "view"); + + uassert(6875000, + str::stream() << "Found multiple collections with the same name '" << nss << "'", + !it.more()); + } else { + uassert(ErrorCodes::CommandNotSupportedOnView, + "Cannot analyze queries for a view", + !CollectionCatalog::get(opCtx)->lookupView(opCtx, nss)); + + AutoGetCollectionForReadCommand collection(opCtx, nss); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Cannot analyze queries for a non-existing collection", + collection); + } +} + class ConfigureQueryAnalyzerCmd : public TypedCommand { public: using Request = ConfigureQueryAnalyzer; @@ -66,90 +120,15 @@ public: const auto& nss = ns(); const auto mode = request().getMode(); const auto sampleRate = request().getSampleRate(); - uassert(ErrorCodes::InvalidOptions, - "Cannot specify 'sampleRate' when 'mode' is \"off\"", - mode != QueryAnalyzerModeEnum::kOff || !sampleRate); - uassert(ErrorCodes::InvalidOptions, - str::stream() << "'sampleRate' must be greater than 0", - mode != QueryAnalyzerModeEnum::kFull || (sampleRate && *sampleRate > 0)); - - auto newConfig = request().getConfiguration(); - - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - auto dbInfo = - uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db())); - - ListCollections listCollections; - listCollections.setDbName(nss.db()); - listCollections.setFilter(BSON("name" << nss.coll())); - - auto cmdResponse = executeCommandAgainstDatabasePrimary( - opCtx, - nss.db(), - dbInfo, - CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({})), - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - Shard::RetryPolicy::kIdempotent); - auto remoteResponse = uassertStatusOK(cmdResponse.swResponse); - uassertStatusOK(getStatusFromCommandResult(remoteResponse.data)); - - auto firstBatch = remoteResponse.data.firstElement()["firstBatch"].Obj(); - BSONObjIterator it(firstBatch); - - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Cannot analyze queries for a non-existing collection", - it.more()); - - auto doc = it.next().Obj().getOwned(); - - uassert(ErrorCodes::CommandNotSupportedOnView, - "Cannot analyze queries for a view", - doc.getStringField("type") != "view"); - uassert(6875000, - str::stream() - << "Found multiple collections with the same name '" << nss << "'", - !it.more()); - - auto listCollRepItem = ListCollectionsReplyItem::parse( - IDLParserContext("ListCollectionsReplyItem"), doc); - auto info = listCollRepItem.getInfo(); - invariant(info); - auto uuid = info->getUuid(); - - QueryAnalyzerDocument qad; - qad.setNs(nss); - qad.setCollectionUuid(*uuid); - qad.setConfiguration(newConfig); - // TODO SERVER-69804: Implement start/stop timestamp in config.queryAnalyzers - // document. - LOGV2(6915001, - "Persisting query analyzer configuration", - "nss"_attr = nss, - "collectionUuid"_attr = uuid, - "mode"_attr = mode, - "sampleRate"_attr = sampleRate); - PersistentTaskStore store{ - NamespaceString::kConfigQueryAnalyzersNamespace}; - store.upsert(opCtx, - BSON(QueryAnalyzerDocument::kCollectionUuidFieldName - << qad.getCollectionUuid()), - qad.toBSON(), - WriteConcerns::kMajorityWriteConcernNoTimeout); - } else { - uassert(ErrorCodes::CommandNotSupportedOnView, - "Cannot analyze queries for a view", - !CollectionCatalog::get(opCtx)->lookupView(opCtx, nss)); - - AutoGetCollectionForReadCommand collection(opCtx, nss); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Cannot analyze queries for a non-existing collection", - collection); - } - - Response response; - // TODO SERVER-70019: Make configQueryAnalyzer return old configuration. - response.setNewConfiguration(newConfig); - return response; + validateCommandOptions(opCtx, nss, mode, sampleRate); + + LOGV2(6875002, + "Configuring query analysis", + "nss"_attr = nss, + "mode"_attr = mode, + "sampleRate"_attr = sampleRate); + + return {}; } private: diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp index 449d800368a..8c95d2074de 100644 --- a/src/mongo/db/s/drop_collection_coordinator.cpp +++ b/src/mongo/db/s/drop_collection_coordinator.cpp @@ -182,8 +182,6 @@ ExecutorFuture DropCollectionCoordinator::_runImpl( "namespace"_attr = nss(), "sharded"_attr = collIsSharded); - sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss(), boost::none); - if (collIsSharded) { invariant(_doc.getCollInfo()); const auto& coll = _doc.getCollInfo().value(); diff --git a/src/mongo/db/s/drop_collection_coordinator_document.idl b/src/mongo/db/s/drop_collection_coordinator_document.idl index 4c71ec33d5c..a0388729cac 100644 --- a/src/mongo/db/s/drop_collection_coordinator_document.idl +++ b/src/mongo/db/s/drop_collection_coordinator_document.idl @@ -72,6 +72,6 @@ structs: optional: true collectionUUID: type: uuid - description: "The expected UUID of the collection, only set and used in C2C replication." + description: "The expected UUID of the collection." optional: true diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp index a492cee54de..349d9ae4314 100644 --- a/src/mongo/db/s/drop_database_coordinator.cpp +++ b/src/mongo/db/s/drop_database_coordinator.cpp @@ -130,9 +130,6 @@ void DropDatabaseCoordinator::_dropShardedCollection( sharding_ddl_util::removeCollAndChunksMetadataFromConfig( opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern); - // Remove collection's query analyzer configuration document, if it exists. - sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss, coll.getUuid()); - _updateSession(opCtx); sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getCurrentSession()); diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp index 0fc2ec08d38..be96e307fe2 100644 --- a/src/mongo/db/s/flush_database_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp @@ -176,7 +176,7 @@ public: const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquire( opCtx, ns().dbName(), DSSAcquisitionMode::kShared); criticalSectionSignal = - scopedDss->getCriticalSectionSignal(ShardingMigrationCriticalSection::kWrite); + scopedDss->getCriticalSectionSignal(ShardingMigrationCriticalSection::kRead); } if (criticalSectionSignal) diff --git a/src/mongo/db/s/range_deleter_service.cpp b/src/mongo/db/s/range_deleter_service.cpp index 7b85e07e68f..170e32ec944 100644 --- a/src/mongo/db/s/range_deleter_service.cpp +++ b/src/mongo/db/s/range_deleter_service.cpp @@ -248,9 +248,9 @@ void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long te } auto lock = _acquireMutexUnconditionally(); - dassert(_state == kDown, "Service expected to be down before stepping up"); + dassert(_state.load() == kDown, "Service expected to be down before stepping up"); - _state = kInitializing; + _state.store(kInitializing); if (_executor) { // Join previously shutted down executor before reinstantiating it @@ -279,7 +279,7 @@ void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long te void RangeDeleterService::_recoverRangeDeletionsOnStepUp(OperationContext* opCtx) { if (disableResumableRangeDeleter.load()) { - _state = kDown; + _state.store(kDown); return; } @@ -358,8 +358,8 @@ void RangeDeleterService::_recoverRangeDeletionsOnStepUp(OperationContext* opCtx auto lock = _acquireMutexUnconditionally(); // Since the recovery is only spawned on step-up but may complete later, it's not // assumable that the node is still primary when the all resubmissions finish - if (_state != kDown) { - this->_state = kUp; + if (_state.load() != kDown) { + this->_state.store(kUp); } }) .semi(); @@ -390,7 +390,7 @@ void RangeDeleterService::_stopService(bool joinExecutor) { // Clear range deletion tasks map in order to notify potential waiters on completion futures _rangeDeletionTasks.clear(); - _state = kDown; + _state.store(kDown); } void RangeDeleterService::onStepDown() { @@ -439,47 +439,75 @@ SharedSemiFuture RangeDeleterService::registerTask( .share(); } - auto scheduleRangeDeletionChain = [&]() { - // Step 1: wait for ongoing queries retaining the range to drain - (void)std::move(waitForActiveQueriesToComplete) - .thenRunOn(_executor) - .then([this, when = rdt.getWhenToClean()]() { - // Step 2: schedule wait for secondaries orphans cleanup delay - const auto delayForActiveQueriesOnSecondariesToComplete = - when == CleanWhenEnum::kDelayed ? Seconds(orphanCleanupDelaySecs.load()) - : Seconds(0); - - return sleepUntil(_executor, - _executor->now() + delayForActiveQueriesOnSecondariesToComplete) - .share(); - }) - .then([this, rdt = rdt]() { - // Step 3: schedule the actual range deletion task - auto lock = _acquireMutexUnconditionally(); - invariant( - _readyRangeDeletionsProcessorPtr || _state == kDown, - "The range deletions processor must be instantiated if the state != kDown"); - if (_state != kDown) { - _readyRangeDeletionsProcessorPtr->emplaceRangeDeletion(rdt); - } - }); - }; - - auto lock = - fromResubmitOnStepUp ? _acquireMutexUnconditionally() : _acquireMutexFailIfServiceNotUp(); - auto [registeredTask, firstRegistration] = - _rangeDeletionTasks[rdt.getCollectionUuid()].insert(std::make_shared(rdt)); + // Block the scheduling of the task while populating internal data structures + SharedPromise blockUntilRegistered; + + (void)blockUntilRegistered.getFuture() + .semi() + .thenRunOn(_executor) + .onError([serializedTask = rdt.toBSON()](Status errStatus) { + // The above futures can only fail with those specific codes (futures notifying + // the end of ongoing queries on a range will never be set to an error): + // - 67635: the task was already previously scheduled + // - BrokenPromise: the executor is shutting down + // - Cancellation error: the node is shutting down or a stepdown happened + if (errStatus.code() != 67635 && errStatus != ErrorCodes::BrokenPromise && + !ErrorCodes::isCancellationError(errStatus)) { + LOGV2_ERROR(6784800, + "Range deletion scheduling failed with unexpected error", + "error"_attr = errStatus, + "rangeDeletion"_attr = serializedTask); + } + return errStatus; + }) + .then([waitForOngoingQueries = std::move(waitForActiveQueriesToComplete).share()]() { + // Step 1: wait for ongoing queries retaining the range to drain + return waitForOngoingQueries; + }) + .then([this, when = rdt.getWhenToClean()]() { + // Step 2: schedule wait for secondaries orphans cleanup delay + const auto delayForActiveQueriesOnSecondariesToComplete = + when == CleanWhenEnum::kDelayed ? Seconds(orphanCleanupDelaySecs.load()) + : Seconds(0); + + return sleepUntil(_executor, + _executor->now() + delayForActiveQueriesOnSecondariesToComplete) + .share(); + }) + .then([this, rdt = rdt]() { + // Step 3: schedule the actual range deletion task + auto lock = _acquireMutexUnconditionally(); + invariant(_readyRangeDeletionsProcessorPtr || _state.load() == kDown, + "The range deletions processor must be instantiated if the state != kDown"); + if (_state.load() != kDown) { + _readyRangeDeletionsProcessorPtr->emplaceRangeDeletion(rdt); + } + }); + + auto [taskCompletionFuture, inserted] = [&]() -> std::pair, bool> { + auto lock = fromResubmitOnStepUp ? _acquireMutexUnconditionally() + : _acquireMutexFailIfServiceNotUp(); + auto [registeredTask, inserted] = _rangeDeletionTasks[rdt.getCollectionUuid()].insert( + std::make_shared(rdt)); + auto retFuture = static_cast(registeredTask->get())->getCompletionFuture(); + return {retFuture, inserted}; + }(); - if (firstRegistration) { - scheduleRangeDeletionChain(); + if (inserted) { + // The range deletion task has been registered, so the chain execution can be unblocked + blockUntilRegistered.setFrom(Status::OK()); } else { + // Tried to register a duplicate range deletion task: invalidate the chain + auto errStatus = + Status(ErrorCodes::Error(67635), "Not scheduling duplicated range deletion"); LOGV2_WARNING(6804200, "Tried to register duplicate range deletion task. This results in a no-op.", "collectionUUID"_attr = rdt.getCollectionUuid(), "range"_attr = rdt.getRange()); + blockUntilRegistered.setFrom(errStatus); } - return static_cast(registeredTask->get())->getCompletionFuture(); + return taskCompletionFuture; } void RangeDeleterService::deregisterTask(const UUID& collUUID, const ChunkRange& range) { diff --git a/src/mongo/db/s/range_deleter_service.h b/src/mongo/db/s/range_deleter_service.h index 2b8293805e8..2ac88ffc07e 100644 --- a/src/mongo/db/s/range_deleter_service.h +++ b/src/mongo/db/s/range_deleter_service.h @@ -175,7 +175,7 @@ private: enum State { kInitializing, kUp, kDown }; - State _state{kDown}; + AtomicWord _state{kDown}; // Future markes as ready when the state changes to "up" SemiFuture _stepUpCompletedFuture; @@ -183,7 +183,8 @@ private: /* Acquire mutex only if service is up (for "user" operation) */ [[nodiscard]] stdx::unique_lock _acquireMutexFailIfServiceNotUp() { stdx::unique_lock lg(_mutex_DO_NOT_USE_DIRECTLY); - uassert(ErrorCodes::NotYetInitialized, "Range deleter service not up", _state == kUp); + uassert( + ErrorCodes::NotYetInitialized, "Range deleter service not up", _state.load() == kUp); return lg; } diff --git a/src/mongo/db/s/range_deleter_service_op_observer.h b/src/mongo/db/s/range_deleter_service_op_observer.h index 430535ceac5..1ba16b3f698 100644 --- a/src/mongo/db/s/range_deleter_service_op_observer.h +++ b/src/mongo/db/s/range_deleter_service_op_observer.h @@ -88,8 +88,7 @@ private: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -223,6 +222,7 @@ private: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { return nullptr; diff --git a/src/mongo/db/s/range_deleter_service_test.cpp b/src/mongo/db/s/range_deleter_service_test.cpp index 5d6b49b7587..89ac2f45bad 100644 --- a/src/mongo/db/s/range_deleter_service_test.cpp +++ b/src/mongo/db/s/range_deleter_service_test.cpp @@ -35,6 +35,7 @@ #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/range_deletion_util.h" namespace mongo { @@ -851,4 +852,42 @@ TEST_F(RangeDeleterServiceTest, GetOverlappingRangeDeletionsWithNonContiguousTas ASSERT_OK(futureReadyWhenTask30Ready.getNoThrow(opCtx)); } +TEST_F(RangeDeleterServiceTest, AddKeyPatternFieldIfMissing) { + DBDirectClient dbclient(opCtx); + + // Insert range deletion tasks without keyPattern field + RangeDeletionTask rdt1 = createRangeDeletionTask( + uuidCollA, BSON(kShardKey << 0), BSON(kShardKey << 10), CleanWhenEnum::kDelayed); + RangeDeletionTask rdt2 = createRangeDeletionTask( + uuidCollA, BSON(kShardKey << 10), BSON(kShardKey << 20), CleanWhenEnum::kDelayed); + + RangeDeletionTask rdt3 = createRangeDeletionTask( + uuidCollB, BSON(kShardKey << 0), BSON(kShardKey << 10), CleanWhenEnum::kDelayed); + RangeDeletionTask rdt4 = createRangeDeletionTask( + uuidCollB, BSON(kShardKey << 10), BSON(kShardKey << 20), CleanWhenEnum::kDelayed); + + insertRangeDeletionTaskDocument(opCtx, rdt1); + insertRangeDeletionTaskDocument(opCtx, rdt2); + insertRangeDeletionTaskDocument(opCtx, rdt3); + insertRangeDeletionTaskDocument(opCtx, rdt4); + + { + auto cursor = dbclient.find(FindCommandRequest(NamespaceString::kRangeDeletionNamespace)); + while (cursor->more()) { + BSONObj doc = cursor->next(); + ASSERT(!doc.hasField(RangeDeletionTask::kKeyPatternFieldName)); + } + } + + addKeyPatternFieldIfMissing(opCtx); + + { + auto cursor = dbclient.find(FindCommandRequest(NamespaceString::kRangeDeletionNamespace)); + while (cursor->more()) { + BSONObj doc = cursor->next(); + ASSERT(doc.hasField(RangeDeletionTask::kKeyPatternFieldName)); + } + } +} + } // namespace mongo diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp index aeda613b823..a594a5f8121 100644 --- a/src/mongo/db/s/range_deletion_util.cpp +++ b/src/mongo/db/s/range_deletion_util.cpp @@ -43,6 +43,7 @@ #include "mongo/db/keypattern.h" #include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_knobs_gen.h" @@ -60,6 +61,8 @@ #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/grid.h" #include "mongo/util/cancellation.h" #include "mongo/util/future_util.h" @@ -632,4 +635,75 @@ void removePersistentRangeDeletionTasksByUUID(OperationContext* opCtx, const UUI uassertStatusOK(getStatusFromWriteCommandReply(commandReply)); } +boost::optional getKeyPatternFromConfigSvr(OperationContext* opCtx, + const UUID& collectionUUID) { + + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + auto query = collectionUUID.toBSON(); + const auto response = uassertStatusOK( + configShard->exhaustiveFindOnConfig(opCtx, + ReadPreferenceSetting(ReadPreference::Nearest), + repl::ReadConcernLevel::kMajorityReadConcern, + CollectionType::ConfigNS, + query, + BSONObj(), + boost::none)); + + // Collection may not exist anymore for a range deletion task, in this case the persistent + // document will be removed after being processed by the range deleter + if (response.docs.empty()) { + return boost::none; + } + + auto collDoc = response.docs.front(); + if (!collDoc.hasField(CollectionType::kKeyPatternFieldName)) { + return boost::none; + } + return KeyPattern::fromBSON(collDoc.getField(CollectionType::kKeyPatternFieldName).Obj()); +} + +void addKeyPatternFieldIfMissing(OperationContext* opCtx) { + DBDirectClient dbClient(opCtx); + + // 1. Get the list of collectionUUID with at least one existing range deletion missing the + // `keyPattern` field + std::vector pipeline; + pipeline.push_back(BSON( + "$match" << BSON(RangeDeletionTask::kKeyPatternFieldName << BSON("$exists" << false)))); + pipeline.push_back( + BSON("$group" << BSON("_id" + << "$" + RangeDeletionTask::kCollectionUuidFieldName))); + + AggregateCommandRequest aggRequest(NamespaceString::kRangeDeletionNamespace, pipeline); + auto cursor = uassertStatusOKWithContext( + DBClientCursor::fromAggregationRequest( + &dbClient, aggRequest, false /* secondaryOk */, true /* useExhaust */), + "Failed to establish a cursor for aggregation"); + + // 2. Set the `keyPattern` field for every returned collectionUuid + while (cursor->more()) { + BSONObj doc = cursor->next(); + UUID collUuid = UUID(uassertStatusOK(UUID::parse(doc.getField("_id")))); + BSONObj query = BSON(RangeDeletionTask::kCollectionUuidFieldName + << collUuid << RangeDeletionTask::kKeyPatternFieldName + << BSON("$exists" << false)); + + auto keyPattern = getKeyPatternFromConfigSvr(opCtx, collUuid); + if (keyPattern) { + auto commandResponse = write_ops::checkWriteErrors(dbClient.update([&] { + write_ops::UpdateCommandRequest updateOp(NamespaceString::kRangeDeletionNamespace); + BSONObj update = BSON("$set" << BSON(RangeDeletionTask::kKeyPatternFieldName + << (*keyPattern).toBSON())); + auto updateModification = + write_ops::UpdateModification::parseFromClassicUpdate(update); + write_ops::UpdateOpEntry updateEntry(query, updateModification); + updateEntry.setMulti(true); + updateEntry.setUpsert(false); + updateOp.setUpdates({updateEntry}); + return updateOp; + }())); + } + } +} + } // namespace mongo diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h index 1042137a184..de2717428d4 100644 --- a/src/mongo/db/s/range_deletion_util.h +++ b/src/mongo/db/s/range_deletion_util.h @@ -159,4 +159,6 @@ auto withTemporaryOperationContext(Callable&& callable, return callable(opCtx); } +void addKeyPatternFieldIfMissing(OperationContext* opCtx); + } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_op_observer.h b/src/mongo/db/s/resharding/resharding_op_observer.h index de3a84ded6b..73e2ea4c9b7 100644 --- a/src/mongo/db/s/resharding/resharding_op_observer.h +++ b/src/mongo/db/s/resharding/resharding_op_observer.h @@ -72,8 +72,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -239,6 +238,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { return nullptr; diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index b5174ae65bb..480c90b2e41 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -72,7 +72,7 @@ bool joinDbVersionOperation( invariant(scopedDss->has_value()); if (auto critSect = - (**scopedDss)->getCriticalSectionSignal(ShardingMigrationCriticalSection::kWrite)) { + (**scopedDss)->getCriticalSectionSignal(ShardingMigrationCriticalSection::kRead)) { LOGV2_DEBUG(6697201, 2, "Waiting for exit from the critical section", diff --git a/src/mongo/db/s/shard_server_op_observer.h b/src/mongo/db/s/shard_server_op_observer.h index 3c1db831e7d..b5e8580f9e0 100644 --- a/src/mongo/db/s/shard_server_op_observer.h +++ b/src/mongo/db/s/shard_server_op_observer.h @@ -56,8 +56,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -218,6 +217,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override { return nullptr; diff --git a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp index d455672d8b8..4220987da6d 100644 --- a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp +++ b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp @@ -66,8 +66,6 @@ std::vector generateConfigShardSampleData(int nShards) { std::vector getPlacementDataSample() { std::vector placementDataSample = {}; - const auto coll1Uuid = UUID::gen(); - const auto coll2Uuid = UUID::gen(); // create database mock placementDataSample.push_back(BSON("_id' " << 1 << "nss" << "mock" @@ -76,24 +74,21 @@ std::vector getPlacementDataSample() { // shard collection mock.collection1 placementDataSample.push_back(BSON("_id' " << 2 << "nss" << "mock.collection1" - << "uuid" << coll1Uuid << "timestamp" - << Timestamp(2, 0) << "shards" + << "timestamp" << Timestamp(2, 0) << "shards" << BSON_ARRAY("shard1" << "shard2" << "shard3"))); // shard collection mock.collection2 placementDataSample.push_back(BSON("_id' " << 3 << "nss" << "mock.collection2" - << "uuid" << coll2Uuid << "timestamp" - << Timestamp(3, 0) << "shards" + << "timestamp" << Timestamp(3, 0) << "shards" << BSON_ARRAY("shard1" << "shard2" << "shard3"))); // drop collection2 placementDataSample.push_back(BSON("_id' " << 4 << "nss" << "mock.collection2" - << "uuid" << coll2Uuid << "timestamp" - << Timestamp(4, 0) << "shards" + << "timestamp" << Timestamp(4, 0) << "shards" << BSONArrayBuilder().arr())); // move primary from shard1 to shard2 placementDataSample.push_back(BSON("_id' " << 5 << "nss" @@ -103,8 +98,7 @@ std::vector getPlacementDataSample() { // move last chunk of collection 1 located in shard1 to shard4 placementDataSample.push_back(BSON("_id' " << 6 << "nss" << "mock.collection1" - << "uuid" << coll1Uuid << "timestamp" - << Timestamp(6, 0) << "shards" + << "timestamp" << Timestamp(6, 0) << "shards" << BSON_ARRAY("shard2" << "shard3" << "shard4"))); diff --git a/src/mongo/db/s/sharding_ddl_coordinator.h b/src/mongo/db/s/sharding_ddl_coordinator.h index c404df4445b..d8f42fc6ea5 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator.h +++ b/src/mongo/db/s/sharding_ddl_coordinator.h @@ -188,13 +188,7 @@ protected: const BSONObj& initialStateDoc) : ShardingDDLCoordinator(service, initialStateDoc), _coordinatorName(name), - /* - * Force a deserialisation + serialisation of the initialStateDoc to ensure that - * _initialState is a full deep copy of the received parameter. - */ - _initialState( - StateDoc::parse(IDLParserContext("CoordinatorInitialState"), initialStateDoc) - .toBSON()), + _initialState(initialStateDoc.getOwned()), _doc(StateDoc::parse(IDLParserContext("CoordinatorDocument"), _initialState)) {} ShardingDDLCoordinatorMetadata const& metadata() const override { diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp index 37c0de48493..5beca502d6a 100644 --- a/src/mongo/db/s/sharding_ddl_util.cpp +++ b/src/mongo/db/s/sharding_ddl_util.cpp @@ -34,7 +34,6 @@ #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/namespace_string.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/remove_tags_gen.h" @@ -45,7 +44,6 @@ #include "mongo/db/write_block_bypass.h" #include "mongo/logv2/log.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" -#include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_tags.h" @@ -315,40 +313,6 @@ void removeTagsMetadataFromConfig(OperationContext* opCtx, str::stream() << "Error removing tags for collection " << nss.toString()); } -void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid) { - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - write_ops::DeleteCommandRequest deleteCmd(NamespaceString::kConfigQueryAnalyzersNamespace); - if (uuid) { - deleteCmd.setDeletes({[&] { - write_ops::DeleteOpEntry entry; - entry.setQ(BSON(QueryAnalyzerDocument::kCollectionUuidFieldName << uuid->toString())); - entry.setMulti(false); - return entry; - }()}); - } else { - deleteCmd.setDeletes({[&] { - write_ops::DeleteOpEntry entry; - entry.setQ(BSON(QueryAnalyzerDocument::kNsFieldName << nss.toString())); - entry.setMulti(true); - return entry; - }()}); - } - - const auto deleteResult = configShard->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - NamespaceString::kConfigDb.toString(), - CommandHelpers::appendMajorityWriteConcern(deleteCmd.toBSON({})), - Shard::RetryPolicy::kIdempotent); - - uassertStatusOKWithContext(Shard::CommandResponse::getEffectiveStatus(std::move(deleteResult)), - str::stream() - << "Error removing query analyzer configurations for collection " - << nss.toString()); -} - void removeTagsMetadataFromConfig_notIdempotent(OperationContext* opCtx, const NamespaceString& nss, const WriteConcernOptions& writeConcern) { @@ -639,5 +603,6 @@ BSONObj getCriticalSectionReasonForRename(const NamespaceString& from, const Nam << "rename" << "from" << from.toString() << "to" << to.toString()); } + } // namespace sharding_ddl_util } // namespace mongo diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h index d65998494a7..157a65399a1 100644 --- a/src/mongo/db/s/sharding_ddl_util.h +++ b/src/mongo/db/s/sharding_ddl_util.h @@ -90,13 +90,6 @@ bool removeCollAndChunksMetadataFromConfig_notIdempotent(OperationContext* opCtx const NamespaceString& nss, const WriteConcernOptions& writeConcern); -/** - * Delete the config query analyzer document for the given collection, if it exists. - */ -void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid); - /** * Rename sharded collection metadata as part of a renameCollection operation. * diff --git a/src/mongo/db/s/shardsvr_create_global_index_command.cpp b/src/mongo/db/s/shardsvr_create_global_index_command.cpp index 0ed41877b14..cdf86c95e60 100644 --- a/src/mongo/db/s/shardsvr_create_global_index_command.cpp +++ b/src/mongo/db/s/shardsvr_create_global_index_command.cpp @@ -84,10 +84,6 @@ public: } void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "Global indexes are not enabled.", - gFeatureFlagGlobalIndexes.isEnabled(serverGlobalParams.featureCompatibility)); - const auto indexUUID = request().getCommandParameter(); global_index::createContainer(opCtx, indexUUID); } diff --git a/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp b/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp index 8fc4d6cc25d..f7e384724ee 100644 --- a/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp +++ b/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp @@ -56,10 +56,6 @@ public: using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "Global indexes are not enabled.", - gFeatureFlagGlobalIndexes.isEnabled(serverGlobalParams.featureCompatibility)); - uassert(6924200, "_shardsvrDeleteGlobalIndexKey must run inside a multi-doc transaction.", opCtx->inMultiDocumentTransaction()); diff --git a/src/mongo/db/s/shardsvr_drop_global_index_command.cpp b/src/mongo/db/s/shardsvr_drop_global_index_command.cpp index c55526cb16a..8b3dff6fcd9 100644 --- a/src/mongo/db/s/shardsvr_drop_global_index_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_global_index_command.cpp @@ -83,10 +83,6 @@ public: } void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "Global indexes are not enabled.", - gFeatureFlagGlobalIndexes.isEnabled(serverGlobalParams.featureCompatibility)); - const auto indexUUID = request().getCommandParameter(); global_index::dropContainer(opCtx, indexUUID); } diff --git a/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp b/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp index 68528125b36..6efc577d432 100644 --- a/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp +++ b/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp @@ -56,10 +56,6 @@ public: using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "Global indexes are not enabled.", - gFeatureFlagGlobalIndexes.isEnabled(serverGlobalParams.featureCompatibility)); - uassert(6789400, "_shardsvrInsertGlobalIndexKey must run inside a multi-doc transaction.", opCtx->inMultiDocumentTransaction()); diff --git a/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp b/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp index 77536e7773f..fc9d7199f0a 100644 --- a/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp +++ b/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp @@ -81,10 +81,6 @@ public: }; void ShardsvrWriteGlobalIndexKeysCmd::Invocation::typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "Global indexes are not enabled.", - gFeatureFlagGlobalIndexes.isEnabled(serverGlobalParams.featureCompatibility)); - uassert(6789500, "_shardsvrWriteGlobalIndexKeys must run inside a multi-doc transaction.", opCtx->inMultiDocumentTransaction()); diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp index 09c8897ae08..6292abe4f2c 100644 --- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp +++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp @@ -285,7 +285,7 @@ public: // (in all cases except the one where this command aborts the local participant), so // ensure waiting for the client's writeConcern of the decision. repl::ReplClientInfo::forClient(opCtx->getClient()) - .setLastOpToSystemLastOpTimeIgnoringCtxInterrupted(opCtx); + .setLastOpToSystemLastOpTimeIgnoringShutdownCtxCancelled(opCtx); }); if (coordinatorDecisionFuture) { diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp index d81d36b28c7..c354c46f517 100644 --- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp +++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp @@ -68,9 +68,9 @@ public: } // No auth needed because it only works when enabled via command line. - Status checkAuthForOperation(OperationContext*, - const DatabaseName&, - const BSONObj&) const override { + Status checkAuthForOperation(OperationContext* opCtx, + const std::string& dbname, + const BSONObj& cmdObj) const override { return Status::OK(); } diff --git a/src/mongo/db/serverless/SConscript b/src/mongo/db/serverless/SConscript index 82143897663..3ccfd8ea7f7 100644 --- a/src/mongo/db/serverless/SConscript +++ b/src/mongo/db/serverless/SConscript @@ -56,21 +56,6 @@ env.Library( ], ) -env.Library( - target='serverless_lock', - source=[ - 'serverless_operation_lock_registry.cpp', - 'serverless_server_status.cpp', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/dbdirectclient', - '$BUILD_DIR/mongo/db/repl/tenant_migration_state_machine_idl', - '$BUILD_DIR/mongo/db/repl/tenant_migration_utils', - '$BUILD_DIR/mongo/db/server_base', - 'shard_split_state_machine', - ], -) - env.Library( target='shard_split_donor_service', source=[ @@ -92,7 +77,6 @@ env.Library( '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/shard_role', - 'serverless_lock', 'shard_split_utils', ], ) @@ -100,7 +84,6 @@ env.Library( env.CppUnitTest( target='db_serverless_test', source=[ - 'serverless_operation_lock_registry_test.cpp', 'shard_split_donor_op_observer_test.cpp', 'shard_split_donor_service_test.cpp', 'shard_split_utils_test.cpp', @@ -114,7 +97,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/dbtests/mocklib', - 'serverless_lock', 'shard_split_donor_service', 'shard_split_utils', ], diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry.cpp b/src/mongo/db/serverless/serverless_operation_lock_registry.cpp deleted file mode 100644 index 20a02c6cd15..00000000000 --- a/src/mongo/db/serverless/serverless_operation_lock_registry.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/serverless/serverless_operation_lock_registry.h" -#include "mongo/db/persistent_task_store.h" -#include "mongo/db/repl/tenant_migration_state_machine_gen.h" -#include "mongo/db/serverless/shard_split_state_machine_gen.h" -#include "mongo/logv2/log.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration - -// Failpoint that will cause recoverLocks to return early. -MONGO_FAIL_POINT_DEFINE(skipRecoverServerlessOperationLock); -namespace mongo { - -const ServiceContext::Decoration - ServerlessOperationLockRegistry::get = - ServiceContext::declareDecoration(); - -void ServerlessOperationLockRegistry::acquireLock( - ServerlessOperationLockRegistry::LockType lockType, const UUID& operationId) { - stdx::lock_guard lg(_mutex); - - // Verify there is no serverless operation in progress or it is the same type as the one - // acquiring the lock. - uassert(ErrorCodes::ConflictingServerlessOperation, - "Conflicting serverless operation in progress", - !_activeLockType || _activeLockType.get() == lockType); - invariant(_activeOperations.find(operationId) == _activeOperations.end(), - "Cannot acquire the serverless lock twice for the same operationId."); - _activeLockType = lockType; - - _activeOperations.emplace(operationId); - - LOGV2(6531500, - "Acquired serverless operation lock", - "type"_attr = lockType, - "id"_attr = operationId); -} - -void ServerlessOperationLockRegistry::releaseLock( - ServerlessOperationLockRegistry::LockType lockType, const UUID& operationId) { - stdx::lock_guard lg(_mutex); - - invariant(_activeLockType && *_activeLockType == lockType, - "Cannot release a serverless lock that is not owned by the given lock type."); - - invariant(_activeOperations.find(operationId) != _activeOperations.end(), - "Cannot release a serverless lock if the given operationId does not own the lock."); - _activeOperations.erase(operationId); - - if (_activeOperations.empty()) { - _activeLockType.reset(); - } - - LOGV2(6531501, - "Released serverless operation lock", - "type"_attr = lockType, - "id"_attr = operationId); -} - -void ServerlessOperationLockRegistry::onDropStateCollection(LockType lockType) { - stdx::lock_guard lg(_mutex); - - if (!_activeLockType || *_activeLockType != lockType) { - return; - } - - LOGV2(6531505, - "Released all serverless locks due to state collection drop", - "type"_attr = lockType); - - _activeLockType.reset(); - _activeOperations.clear(); -} - -void ServerlessOperationLockRegistry::clear() { - stdx::lock_guard lg(_mutex); - LOGV2(6531504, - "Clearing serverless operation lock registry on shutdown", - "ns"_attr = _activeLockType); - - _activeOperations.clear(); - _activeLockType.reset(); -} - -void ServerlessOperationLockRegistry::recoverLocks(OperationContext* opCtx) { - if (skipRecoverServerlessOperationLock.shouldFail()) { - return; - } - - auto& registry = ServerlessOperationLockRegistry::get(opCtx->getServiceContext()); - registry.clear(); - - PersistentTaskStore donorStore( - NamespaceString::kTenantMigrationDonorsNamespace); - donorStore.forEach(opCtx, {}, [&](const TenantMigrationDonorDocument& doc) { - // Do not acquire a lock for garbage-collectable documents - if (doc.getExpireAt()) { - return true; - } - - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, doc.getId()); - - return true; - }); - - PersistentTaskStore recipientStore( - NamespaceString::kTenantMigrationRecipientsNamespace); - recipientStore.forEach(opCtx, {}, [&](const TenantMigrationRecipientDocument& doc) { - // Do not acquire a lock for garbage-collectable documents - if (doc.getExpireAt()) { - return true; - } - - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, - doc.getId()); - - return true; - }); - - PersistentTaskStore splitStore( - NamespaceString::kShardSplitDonorsNamespace); - splitStore.forEach(opCtx, {}, [&](const ShardSplitDonorDocument& doc) { - // Do not acquire a lock for garbage-collectable documents - if (doc.getExpireAt()) { - return true; - } - - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, doc.getId()); - - return true; - }); -} - -const std::string kOperationLockFieldName = "operationLock"; -void ServerlessOperationLockRegistry::appendInfoForServerStatus(BSONObjBuilder* builder) const { - stdx::lock_guard lg(_mutex); - - if (!_activeLockType) { - builder->append(kOperationLockFieldName, 0); - return; - } - - switch (_activeLockType.value()) { - case ServerlessOperationLockRegistry::LockType::kShardSplit: - builder->append(kOperationLockFieldName, 1); - break; - case ServerlessOperationLockRegistry::LockType::kTenantDonor: - builder->append(kOperationLockFieldName, 2); - break; - case ServerlessOperationLockRegistry::LockType::kTenantRecipient: - builder->append(kOperationLockFieldName, 3); - break; - } -} - -boost::optional -ServerlessOperationLockRegistry::getActiveOperationType_forTest() { - stdx::lock_guard lg(_mutex); - - return _activeLockType; -} - - -} // namespace mongo diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry.h b/src/mongo/db/serverless/serverless_operation_lock_registry.h deleted file mode 100644 index d9ac07393f4..00000000000 --- a/src/mongo/db/serverless/serverless_operation_lock_registry.h +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/service_context.h" -#include "mongo/platform/mutex.h" -#include "mongo/util/uuid.h" - -#include - -namespace mongo { - -/** - * Registry to allow only one type of active serverless operation at a time. It allows multiple - * simultaneous operations of the same type. - */ -class ServerlessOperationLockRegistry { - ServerlessOperationLockRegistry(const ServerlessOperationLockRegistry&) = delete; - ServerlessOperationLockRegistry& operator=(const ServerlessOperationLockRegistry&) = delete; - -public: - ServerlessOperationLockRegistry() = default; - - static const ServiceContext::Decoration get; - - enum LockType { kShardSplit, kTenantDonor, kTenantRecipient }; - - /** - * Acquire the serverless lock for LockType and adds operationId to the set of - * instances tracked. Throws ConflictingOperationInProgress error if there is already an - * activeServerlessOperation in progress with a different namespace than operationNamespace. - */ - void acquireLock(LockType lockType, const UUID& operationId); - - /** - * If _activeOpSeverlessOperation matches LockType, removes the given operationId from - * the set of active instances and releases the lock if the set becomes empty. Invariant if - * lockType or operationId does not own the lock. - */ - void releaseLock(LockType lockType, const UUID& operationId); - - /** - * Called when a state document collection is dropped. If the collection's lockType currently - * holds the lock, it releases the lock. If it does not own the lock, the function does nothing. - */ - void onDropStateCollection(LockType lockType); - - void clear(); - - /** - * Scan serverless state documents and acquire the serverless mutual exclusion lock if needed. - */ - static void recoverLocks(OperationContext* opCtx); - - /** - * Appends the exclusion status to the BSONObjBuilder. - */ - void appendInfoForServerStatus(BSONObjBuilder* builder) const; - - boost::optional getActiveOperationType_forTest(); - -private: - mutable Mutex _mutex = MONGO_MAKE_LATCH("ServerlessMutualExclusionRegistry::_mutex"); - boost::optional _activeLockType; - std::set _activeOperations; -}; - -} // namespace mongo diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp b/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp deleted file mode 100644 index 9d95b3b7bc7..00000000000 --- a/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/serverless/serverless_operation_lock_registry.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" - -namespace mongo { - -TEST(ServerlessOperationLockRegistryTest, InsertRemoveOne) { - ServerlessOperationLockRegistry registry; - - auto id = UUID::gen(); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - registry.releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - - ASSERT_FALSE(registry.getActiveOperationType_forTest()); -} - -DEATH_TEST(ServerlessOperationLockRegistryTest, - InsertSameIdTwice, - "Cannot acquire the serverless lock twice for the same operationId.") { - ServerlessOperationLockRegistry registry; - - auto id = UUID::gen(); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); -} - -TEST(ServerlessOperationLockRegistryTest, AcquireDifferentNamespaceFail) { - ServerlessOperationLockRegistry registry; - - auto id = UUID::gen(); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - - ASSERT_THROWS_CODE( - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, UUID::gen()), - DBException, - ErrorCodes::ConflictingServerlessOperation); -} - -DEATH_TEST(ServerlessOperationLockRegistryTest, - ReleaseDifferentNsTriggersInvariant, - "Cannot release a serverless lock that is not owned by the given lock type.") { - ServerlessOperationLockRegistry registry; - - auto id = UUID::gen(); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - registry.releaseLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, id); -} - - -DEATH_TEST(ServerlessOperationLockRegistryTest, - ReleaseDifferentIdTriggersInvariant, - "Cannot release a serverless lock if the given operationId does not own the lock.") { - ServerlessOperationLockRegistry registry; - - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, UUID::gen()); - registry.releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, UUID::gen()); -} - -TEST(ServerlessOperationLockRegistryTest, ClearReleasesAllLocks) { - ServerlessOperationLockRegistry registry; - - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, UUID::gen()); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, UUID::gen()); - - registry.clear(); - - // Verify the lock has been released. - ASSERT_FALSE(registry.getActiveOperationType_forTest()); -} - -TEST(ServerlessOperationLockRegistryTest, LockIsReleasedWhenAllInstanceAreRemoved) { - ServerlessOperationLockRegistry registry; - - std::vector ids; - for (int i = 0; i < 5; ++i) { - ids.push_back(UUID::gen()); - } - - for (auto& id : ids) { - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - } - - // Verify the lock is held; - ASSERT_EQ(*registry.getActiveOperationType_forTest(), - ServerlessOperationLockRegistry::LockType::kShardSplit); - - - for (auto& id : ids) { - registry.releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - } - - // Verify the lock has been released. - ASSERT_FALSE(registry.getActiveOperationType_forTest()); -} - -TEST(ServerlessOperationLockRegistryTest, LockIsNotReleasedWhenNotAllInstanceAreRemoved) { - ServerlessOperationLockRegistry registry; - - std::vector ids; - for (int i = 0; i < 5; ++i) { - ids.push_back(UUID::gen()); - } - - for (auto& id : ids) { - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - } - // Add an additional id; - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, UUID::gen()); - - // Verify the lock is held; - ASSERT_EQ(*registry.getActiveOperationType_forTest(), - ServerlessOperationLockRegistry::LockType::kShardSplit); - - for (auto& id : ids) { - registry.releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, id); - } - - // Verify the lock is held; - ASSERT_EQ(*registry.getActiveOperationType_forTest(), - ServerlessOperationLockRegistry::LockType::kShardSplit); -} - - -} // namespace mongo diff --git a/src/mongo/db/serverless/shard_split_commands.cpp b/src/mongo/db/serverless/shard_split_commands.cpp index 5ce6c9c0307..b0f0e6c6cab 100644 --- a/src/mongo/db/serverless/shard_split_commands.cpp +++ b/src/mongo/db/serverless/shard_split_commands.cpp @@ -114,7 +114,7 @@ public: }; std::string help() const { - return "Start an operation to split a shard into its own slice."; + return "Start an opereation to split a shard into its own slice."; } bool adminOnly() const override { diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp index b2470d07854..2d67495c431 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp +++ b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp @@ -31,7 +31,6 @@ #include "mongo/db/catalog_raii.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/db/serverless/shard_split_utils.h" @@ -49,8 +48,6 @@ bool isPrimary(const OperationContext* opCtx) { const auto tenantIdsToDeleteDecoration = OperationContext::declareDecoration>>(); -const auto shardSplitIdToDeleteDecoration = - OperationContext::declareDecoration>(); ShardSplitDonorDocument parseAndValidateDonorDocument(const BSONObj& doc) { auto donorStateDoc = ShardSplitDonorDocument::parse(IDLParserContext("donorStateDoc"), doc); @@ -149,9 +146,6 @@ void onTransitionToAbortingIndexBuilds(OperationContext* opCtx, invariant(donorStateDoc.getTenantIds()); invariant(donorStateDoc.getRecipientConnectionString()); - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, donorStateDoc.getId()); - auto tenantIds = *donorStateDoc.getTenantIds(); for (const auto& tenantId : tenantIds) { auto mtab = std::make_shared(opCtx->getServiceContext(), @@ -163,13 +157,11 @@ void onTransitionToAbortingIndexBuilds(OperationContext* opCtx, if (isPrimary(opCtx)) { // onRollback is not registered on secondaries since secondaries should not fail to // apply the write. - opCtx->recoveryUnit()->onRollback([opCtx, tenantIds, migrationId = donorStateDoc.getId()] { + opCtx->recoveryUnit()->onRollback([opCtx, tenantIds] { for (const auto& tenantId : tenantIds) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); } - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, migrationId); }); } } @@ -258,10 +250,6 @@ public: void commit(OperationContext* opCtx, boost::optional) override { if (_donorStateDoc.getExpireAt()) { - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, - _donorStateDoc.getId()); - if (_donorStateDoc.getTenantIds()) { auto tenantIds = _donorStateDoc.getTenantIds().value(); for (auto&& tenantId : tenantIds) { @@ -388,13 +376,12 @@ void ShardSplitDonorOpObserver::aboutToDelete(OperationContext* opCtx, } auto donorStateDoc = parseAndValidateDonorDocument(doc); - const bool shouldRemoveOnRecipient = - serverless::shouldRemoveStateDocumentOnRecipient(opCtx, donorStateDoc); uassert(ErrorCodes::IllegalOperation, str::stream() << "cannot delete a donor's state document " << doc << " since it has not been marked as garbage collectable and is not a" << " recipient garbage collectable.", - donorStateDoc.getExpireAt() || shouldRemoveOnRecipient); + donorStateDoc.getExpireAt() || + serverless::shouldRemoveStateDocumentOnRecipient(opCtx, donorStateDoc)); // To support back-to-back split retries, when a split is aborted, we remove its // TenantMigrationDonorAccessBlockers as soon as its donor state doc is marked as garbage @@ -410,10 +397,6 @@ void ShardSplitDonorOpObserver::aboutToDelete(OperationContext* opCtx, tenantIdsToDeleteDecoration(opCtx) = boost::make_optional(result); } - - if (shouldRemoveOnRecipient) { - shardSplitIdToDeleteDecoration(opCtx) = boost::make_optional(donorStateDoc.getId()); - } } void ShardSplitDonorOpObserver::onDelete(OperationContext* opCtx, @@ -436,12 +419,6 @@ void ShardSplitDonorOpObserver::onDelete(OperationContext* opCtx, for (auto&& tenantId : *tenantIdsToDeleteDecoration(opCtx)) { registry.remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); } - - const auto idToDelete = shardSplitIdToDeleteDecoration(opCtx); - if (idToDelete) { - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, *idToDelete); - } }); } @@ -454,9 +431,6 @@ repl::OpTime ShardSplitDonorOpObserver::onDropCollection(OperationContext* opCtx opCtx->recoveryUnit()->onCommit([opCtx](boost::optional) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAll(TenantMigrationAccessBlocker::BlockerType::kDonor); - - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .onDropStateCollection(ServerlessOperationLockRegistry::LockType::kShardSplit); }); } diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer.h b/src/mongo/db/serverless/shard_split_donor_op_observer.h index fba3471c7dd..58b2a3cef1d 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer.h +++ b/src/mongo/db/serverless/shard_split_donor_op_observer.h @@ -55,8 +55,7 @@ public: void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -215,6 +214,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp index f30ad593951..97c923524a3 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp +++ b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp @@ -34,7 +34,6 @@ #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/db/serverless/shard_split_test_utils.h" @@ -447,13 +446,7 @@ TEST_F(ShardSplitDonorOpObserverTest, SetExpireAtForAbortedRemoveBlockers) { ASSERT_FALSE(mtab); }; - ServerlessOperationLockRegistry::get(_opCtx->getServiceContext()) - .acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, _uuid); - runUpdateTestCase(stateDocument, _tenantIds, mtabVerifier); - - ASSERT_FALSE(ServerlessOperationLockRegistry::get(_opCtx->getServiceContext()) - .getActiveOperationType_forTest()); } TEST_F(ShardSplitDonorOpObserverTest, DeleteAbortedDocumentDoesNotRemoveBlockers) { diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp index fa977ae0c7d..4c3fdabb39f 100644 --- a/src/mongo/db/serverless/shard_split_donor_service.cpp +++ b/src/mongo/db/serverless/shard_split_donor_service.cpp @@ -1005,18 +1005,7 @@ ExecutorFuture ShardSplitDonorService::DonorStateMachine::_updateS return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); }) - .until([&](StatusWith swOpTime) { - if (swOpTime.getStatus().code() == ErrorCodes::ConflictingServerlessOperation) { - LOGV2(6531509, - "Shard split completed due to serverless lock error", - "id"_attr = _migrationId, - "status"_attr = swOpTime.getStatus()); - stdx::lock_guard lg(_mutex); - - uassertStatusOK(swOpTime); - } - return swOpTime.getStatus().isOK(); - }) + .until([](StatusWith swOpTime) { return swOpTime.getStatus().isOK(); }) .withBackoffBetweenIterations(kExponentialBackoff) .on(**executor, token); } @@ -1078,8 +1067,7 @@ ShardSplitDonorService::DonorStateMachine::_handleErrorOrEnterAbortedState( } } - if (ErrorCodes::isNotPrimaryError(status) || ErrorCodes::isShutdownError(status) || - status.code() == ErrorCodes::ConflictingServerlessOperation) { + if (ErrorCodes::isNotPrimaryError(status) || ErrorCodes::isShutdownError(status)) { // Don't abort the split on retriable errors that may have been generated by the local // server shutting/stepping down because it can be resumed when the client retries. return ExecutorFuture(**executor, StatusWith{status}); diff --git a/src/mongo/db/serverless/shard_split_donor_service_test.cpp b/src/mongo/db/serverless/shard_split_donor_service_test.cpp index f46c908059b..463031b66bc 100644 --- a/src/mongo/db/serverless/shard_split_donor_service_test.cpp +++ b/src/mongo/db/serverless/shard_split_donor_service_test.cpp @@ -49,7 +49,6 @@ #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_donor_service.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" @@ -512,11 +511,6 @@ TEST_F(ShardSplitDonorServiceTest, BasicShardSplitDonorServiceInstanceCreation) waitForReplSetStepUp(Status(ErrorCodes::OK, "")); waitForRecipientPrimaryMajorityWrite(); - // Verify the serverless lock has been acquired for split. - auto& registry = ServerlessOperationLockRegistry::get(opCtx->getServiceContext()); - ASSERT_EQ(*registry.getActiveOperationType_forTest(), - ServerlessOperationLockRegistry::LockType::kShardSplit); - auto result = serviceInstance->decisionFuture().get(); ASSERT_TRUE(hasActiveSplitForTenants(opCtx.get(), _tenantIds)); ASSERT(!result.abortReason); @@ -526,32 +520,10 @@ TEST_F(ShardSplitDonorServiceTest, BasicShardSplitDonorServiceInstanceCreation) auto completionFuture = serviceInstance->completionFuture(); completionFuture.wait(); - // The lock has been released. - ASSERT_FALSE(registry.getActiveOperationType_forTest()); - ASSERT_OK(serviceInstance->completionFuture().getNoThrow()); ASSERT_TRUE(serviceInstance->isGarbageCollectable()); } -TEST_F(ShardSplitDonorServiceTest, ShardSplitFailsWhenLockIsHeld) { - auto opCtx = makeOperationContext(); - test::shard_split::reconfigToAddRecipientNodes( - getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); - - auto& registry = ServerlessOperationLockRegistry::get(opCtx->getServiceContext()); - registry.acquireLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, UUID::gen()); - - // Create and start the instance. - auto serviceInstance = ShardSplitDonorService::DonorStateMachine::getOrCreate( - opCtx.get(), _service, defaultStateDocument().toBSON()); - ASSERT(serviceInstance.get()); - - auto decisionFuture = serviceInstance->decisionFuture(); - - auto result = decisionFuture.getNoThrow(); - ASSERT_EQ(result.getStatus().code(), ErrorCodes::ConflictingServerlessOperation); -} - TEST_F(ShardSplitDonorServiceTest, ReplSetStepUpRetryable) { auto opCtx = makeOperationContext(); test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); @@ -1043,10 +1015,6 @@ public: stateDocument.setState(ShardSplitDonorStateEnum::kBlocking); stateDocument.setRecipientConnectionString(ConnectionString::forLocal()); - ServerlessOperationLockRegistry::get(getServiceContext()) - .acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, - stateDocument.getId()); - return stateDocument; } }; diff --git a/src/mongo/db/session/internal_session_pool_test.cpp b/src/mongo/db/session/internal_session_pool_test.cpp index b0ae0c5c1df..6a1603922d2 100644 --- a/src/mongo/db/session/internal_session_pool_test.cpp +++ b/src/mongo/db/session/internal_session_pool_test.cpp @@ -52,10 +52,6 @@ public: _opCtx = makeOperationContext(); } - void tearDown() override { - serverGlobalParams.clusterRole = ClusterRole::None; - } - OperationContext* opCtx() const { return _opCtx.get(); } diff --git a/src/mongo/db/storage/checkpointer.cpp b/src/mongo/db/storage/checkpointer.cpp index 7eecdcfad61..4c4a2014a48 100644 --- a/src/mongo/db/storage/checkpointer.cpp +++ b/src/mongo/db/storage/checkpointer.cpp @@ -116,7 +116,7 @@ void Checkpointer::run() { const Date_t startTime = Date_t::now(); // TODO SERVER-50861: Access the storage engine via the ServiceContext. - _kvEngine->checkpoint(opCtx.get()); + _kvEngine->checkpoint(); const auto secondsElapsed = durationCount(Date_t::now() - startTime); if (secondsElapsed >= 30) { diff --git a/src/mongo/db/storage/column_store.h b/src/mongo/db/storage/column_store.h index 30b21cbe340..9db8ca7d1bb 100644 --- a/src/mongo/db/storage/column_store.h +++ b/src/mongo/db/storage/column_store.h @@ -786,6 +786,4 @@ struct SplitCellView { } } }; - -using PathCellSet = std::vector>; } // namespace mongo diff --git a/src/mongo/db/storage/control/journal_flusher.cpp b/src/mongo/db/storage/control/journal_flusher.cpp index 2811da189ba..05678350aec 100644 --- a/src/mongo/db/storage/control/journal_flusher.cpp +++ b/src/mongo/db/storage/control/journal_flusher.cpp @@ -233,6 +233,14 @@ void JournalFlusher::resume() { LOGV2(5142503, "Resumed journal flusher thread"); } +void JournalFlusher::triggerJournalFlush() { + stdx::lock_guard lk(_stateMutex); + if (!_flushJournalNow) { + _flushJournalNow = true; + _flushJournalNowCV.notify_one(); + } +} + void JournalFlusher::waitForJournalFlush() { while (true) { try { diff --git a/src/mongo/db/storage/control/journal_flusher.h b/src/mongo/db/storage/control/journal_flusher.h index d80d5035f3c..699a03d7e93 100644 --- a/src/mongo/db/storage/control/journal_flusher.h +++ b/src/mongo/db/storage/control/journal_flusher.h @@ -75,7 +75,8 @@ public: /** * Runs data flushes every 'storageGlobalParams.journalCommitIntervalMs' millis (unless - * '_disablePeriodicFlushes' is set) or immediately if waitForJournalFlush() is called. + * '_disablePeriodicFlushes' is set) or immediately if triggerJournalFlush() or + * waitForJournalFlush() is called. */ void run(); @@ -97,6 +98,11 @@ public: */ void resume(); + /** + * Signals an immediate journal flush and leaves. + */ + void triggerJournalFlush(); + /** * Signals an immediate journal flush and waits for it to complete before returning. * diff --git a/src/mongo/db/storage/control/storage_control.cpp b/src/mongo/db/storage/control/storage_control.cpp index def2887bebd..d47a4030afe 100644 --- a/src/mongo/db/storage/control/storage_control.cpp +++ b/src/mongo/db/storage/control/storage_control.cpp @@ -117,6 +117,10 @@ void stopStorageControls(ServiceContext* serviceContext, const Status& reason, b } } +void triggerJournalFlush(ServiceContext* serviceContext) { + JournalFlusher::get(serviceContext)->triggerJournalFlush(); +} + void waitForJournalFlush(OperationContext* opCtx) { JournalFlusher::get(opCtx)->waitForJournalFlush(); } diff --git a/src/mongo/db/storage/execution_context.h b/src/mongo/db/storage/execution_context.h index 4ceed7ec4a3..28479f833b9 100644 --- a/src/mongo/db/storage/execution_context.h +++ b/src/mongo/db/storage/execution_context.h @@ -31,7 +31,6 @@ #include "mongo/db/index/multikey_paths.h" #include "mongo/db/operation_context.h" -#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/key_string.h" #include "mongo/util/auto_clear_ptr.h" @@ -62,15 +61,11 @@ public: AutoClearPtr multikeyPaths() { return makeAutoClearPtr(&_multikeyPaths); } - AutoClearPtr columnKeys() { - return makeAutoClearPtr(&_columnKeys); - } private: KeyStringSet _keys; KeyStringSet _multikeyMetadataKeys; MultikeyPaths _multikeyPaths; - PathCellSet _columnKeys; }; } // namespace mongo diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp index f078e946af2..5f371ee9b57 100644 --- a/src/mongo/db/storage/kv/durable_catalog_test.cpp +++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp @@ -235,7 +235,7 @@ protected: wuow.commit(); auto engine = operationContext()->getServiceContext()->getStorageEngine()->getEngine(); - engine->checkpoint(operationContext()); + engine->checkpoint(); storageMetadata = BSON(ident << unittest::assertGet(engine->getStorageMetadata(ident)) << idxIdent diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h index 44e212172c0..2f26769228a 100644 --- a/src/mongo/db/storage/kv/kv_engine.h +++ b/src/mongo/db/storage/kv/kv_engine.h @@ -261,7 +261,7 @@ public: return false; } - virtual void checkpoint(OperationContext* opCtx) {} + virtual void checkpoint() {} /** * Returns true if the KVEngine is ephemeral -- that is, it is NOT persistent and all data is diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp index c04ab030e33..05b2776c6c8 100644 --- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp +++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp @@ -87,8 +87,11 @@ bool OplogCapMaintainerThread::_deleteExcessDocuments() { "Caught an InterruptedDueToStorageChange exception, " "but this thread can safely continue", "error"_attr = e.toStatus()); + } catch (const ExceptionFor&) { + // TODO (SERVER-69496): Remove the ErrorCodes::InterruptedAtShutdown catch block. + return false; } catch (const DBException& ex) { - if (!opCtx->checkForInterruptNoAssert().isOK()) { + if (opCtx->isKillPending()) { return false; } diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h index 2b6a033df3b..d3d6dbc5ee2 100644 --- a/src/mongo/db/storage/storage_engine.h +++ b/src/mongo/db/storage/storage_engine.h @@ -478,9 +478,8 @@ public: /** * Called when the checkpoint thread instructs the storage engine to take a checkpoint. The * underlying storage engine must take a checkpoint at this point. - * Acquires a resource mutex before taking the checkpoint. */ - virtual void checkpoint(OperationContext* opCtx) = 0; + virtual void checkpoint() = 0; /** * Recovers the storage engine state to the last stable timestamp. "Stable" in this case diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index d82da37db16..fbc91ef3f7f 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -1234,8 +1234,8 @@ std::shared_ptr StorageEngineImpl::markIdentInUse(const std::string& iden return _dropPendingIdentReaper.markIdentInUse(ident); } -void StorageEngineImpl::checkpoint(OperationContext* opCtx) { - _engine->checkpoint(opCtx); +void StorageEngineImpl::checkpoint() { + _engine->checkpoint(); } void StorageEngineImpl::_onMinOfCheckpointAndOldestTimestampChanged(const Timestamp& timestamp) { diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h index b4a16898b28..55957685ddf 100644 --- a/src/mongo/db/storage/storage_engine_impl.h +++ b/src/mongo/db/storage/storage_engine_impl.h @@ -324,7 +324,7 @@ public: void startTimestampMonitor() override; - void checkpoint(OperationContext* opCtx) override; + void checkpoint() override; StatusWith reconcileCatalogAndIdents( OperationContext* opCtx, LastShutdownState lastShutdownState) override; diff --git a/src/mongo/db/storage/storage_engine_mock.h b/src/mongo/db/storage/storage_engine_mock.h index 81cb1eee960..37542e2ac8d 100644 --- a/src/mongo/db/storage/storage_engine_mock.h +++ b/src/mongo/db/storage/storage_engine_mock.h @@ -179,7 +179,7 @@ public: } void startTimestampMonitor() final {} - void checkpoint(OperationContext* opCtx) final {} + void checkpoint() final {} int64_t sizeOnDiskForDb(OperationContext* opCtx, const DatabaseName& dbName) final { return 0; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp index 25d4c623b14..1caf0b67158 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp @@ -175,6 +175,7 @@ void WiredTigerColumnStore::WriteCursor::insert(PathView path, RowId rid, CellVi auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx); metricsCollector.incrementOneIdxEntryWritten(c()->uri, keyItem.size); + // TODO: SERVER-65978, we may have to specially handle WT_DUPLICATE_KEY error here. if (ret) { uassertStatusOK(wtRCToStatus(ret, c()->session)); } @@ -218,6 +219,7 @@ void WiredTigerColumnStore::WriteCursor::update(PathView path, RowId rid, CellVi auto& metricsCollector = ResourceConsumption::MetricsCollector::get(_opCtx); metricsCollector.incrementOneIdxEntryWritten(c()->uri, keyItem.size); + // TODO: SERVER-65978, may want to handle WT_NOTFOUND specially. if (ret != 0) return uassertStatusOK(wtRCToStatus(ret, c()->session)); } @@ -446,11 +448,14 @@ Status WiredTigerColumnStore::compact(OperationContext* opCtx) { // TODO: SERVER-65980. uasserted(ErrorCodes::NotImplemented, "WiredTigerColumnStore::compact"); } - bool WiredTigerColumnStore::appendCustomStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) const { - return WiredTigerUtil::appendCustomStats(opCtx, output, scale, _uri); + // TODO: SERVER-65980. + // For now we just skip this so that tests can successfully obtain collection-level stats on a + // collection with a columnstore index. + output->append("note"_sd, "columnstore stats are not yet implemented"_sd); + return true; } } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp index 70819b920ef..ce6f7bb0a84 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp @@ -375,7 +375,41 @@ void WiredTigerIndex::fullValidate(OperationContext* opCtx, bool WiredTigerIndex::appendCustomStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) const { - return WiredTigerUtil::appendCustomStats(opCtx, output, scale, _uri); + dassert(opCtx->lockState()->isReadLocked()); + { + BSONObjBuilder metadata(output->subobjStart("metadata")); + Status status = WiredTigerUtil::getApplicationMetadata(opCtx, uri(), &metadata); + if (!status.isOK()) { + metadata.append("error", "unable to retrieve metadata"); + metadata.append("code", static_cast(status.code())); + metadata.append("reason", status.reason()); + } + } + std::string type, sourceURI; + WiredTigerUtil::fetchTypeAndSourceURI(opCtx, _uri, &type, &sourceURI); + StatusWith metadataResult = WiredTigerUtil::getMetadataCreate(opCtx, sourceURI); + StringData creationStringName("creationString"); + if (!metadataResult.isOK()) { + BSONObjBuilder creationString(output->subobjStart(creationStringName)); + creationString.append("error", "unable to retrieve creation config"); + creationString.append("code", static_cast(metadataResult.getStatus().code())); + creationString.append("reason", metadataResult.getStatus().reason()); + } else { + output->append(creationStringName, metadataResult.getValue()); + // Type can be "lsm" or "file" + output->append("type", type); + } + + WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(); + WT_SESSION* s = session->getSession(); + Status status = + WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri(), "statistics=(fast)", output); + if (!status.isOK()) { + output->append("error", "unable to retrieve statistics"); + output->append("code", static_cast(status.code())); + output->append("reason", status.reason()); + } + return true; } Status WiredTigerIndex::dupKeyCheck(OperationContext* opCtx, const KeyString::Value& key) { @@ -502,9 +536,6 @@ Status WiredTigerIndex::compact(OperationContext* opCtx) { if (!cache->isEphemeral()) { WT_SESSION* s = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); opCtx->recoveryUnit()->abandonSnapshot(); - // WT compact prompts WT to take checkpoints, so we need to take the checkpoint lock around - // WT compact calls. - Lock::ResourceLock checkpointLock{opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; int ret = s->compact(s, uri().c_str(), "timeout=0"); if (MONGO_unlikely(WTCompactIndexEBUSY.shouldFail())) { ret = EBUSY; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp index c1e7ba4d1c6..63eebef295f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp @@ -119,8 +119,7 @@ public: } } auto kv = - std::make_unique(opCtx, - getCanonicalName().toString(), + std::make_unique(getCanonicalName().toString(), params.dbpath, getGlobalServiceContext()->getFastClockSource(), wiredTigerGlobalOptions.engineConfig, diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index c7c6ff620f2..005d4c6982f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -300,8 +300,7 @@ std::string toString(const StorageEngine::OldestActiveTransactionTimestampResult StringData WiredTigerKVEngine::kTableUriPrefix = "table:"_sd; -WiredTigerKVEngine::WiredTigerKVEngine(OperationContext* opCtx, - const std::string& canonicalName, +WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName, const std::string& path, ClockSource* cs, const std::string& extraOpenOptions, @@ -538,7 +537,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(OperationContext* opCtx, if (repair && _hasUri(session.getSession(), _sizeStorerUri)) { LOGV2(22316, "Repairing size cache"); - auto status = _salvageIfNeeded(opCtx, _sizeStorerUri.c_str()); + auto status = _salvageIfNeeded(_sizeStorerUri.c_str()); if (status.code() != ErrorCodes::DataModifiedByRepair) fassertNoTrace(28577, status); } @@ -771,10 +770,10 @@ Status WiredTigerKVEngine::repairIdent(OperationContext* opCtx, StringData ident return Status::OK(); } _ensureIdentPath(ident); - return _salvageIfNeeded(opCtx, uri.c_str()); + return _salvageIfNeeded(uri.c_str()); } -Status WiredTigerKVEngine::_salvageIfNeeded(OperationContext* opCtx, const char* uri) { +Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) { // Using a side session to avoid transactional issues WiredTigerSession sessionWrapper(_conn); WT_SESSION* session = sessionWrapper.getSession(); @@ -784,7 +783,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(OperationContext* opCtx, const char* // operation it will attempt to clean up the dirty elements during checkpointing, thus allowing // the operation to succeed if it was the only reason to fail. if (rc == EBUSY) { - _checkpoint(opCtx, session); + _checkpoint(session); rc = (session->verify)(session, uri, nullptr); } @@ -798,14 +797,14 @@ Status WiredTigerKVEngine::_salvageIfNeeded(OperationContext* opCtx, const char* "Data file is missing. Attempting to drop and re-create the collection.", "uri"_attr = uri); - return _rebuildIdent(opCtx, session, uri); + return _rebuildIdent(session, uri); } LOGV2(22328, "Verify failed. Running a salvage operation.", "uri"_attr = uri); rc = session->salvage(session, uri, nullptr); // Same reasoning for handling EBUSY errors as above. if (rc == EBUSY) { - _checkpoint(opCtx, session); + _checkpoint(session); rc = session->salvage(session, uri, nullptr); } auto status = wtRCToStatus(rc, session, "Salvage failed:"); @@ -820,12 +819,10 @@ Status WiredTigerKVEngine::_salvageIfNeeded(OperationContext* opCtx, const char* "error"_attr = status); // If the data is unsalvageable, we should completely rebuild the ident. - return _rebuildIdent(opCtx, session, uri); + return _rebuildIdent(session, uri); } -Status WiredTigerKVEngine::_rebuildIdent(OperationContext* opCtx, - WT_SESSION* session, - const char* uri) { +Status WiredTigerKVEngine::_rebuildIdent(WT_SESSION* session, const char* uri) { invariant(_inRepairMode); invariant(std::string(uri).find(kTableUriPrefix.rawData()) == 0); @@ -866,7 +863,7 @@ Status WiredTigerKVEngine::_rebuildIdent(OperationContext* opCtx, // operation it will attempt to clean up the dirty elements during checkpointing, thus allowing // the operation to succeed if it was the only reason to fail. if (rc == EBUSY) { - _checkpoint(opCtx, session); + _checkpoint(session); rc = session->drop(session, uri, nullptr); } if (rc != 0) { @@ -1529,7 +1526,7 @@ Status WiredTigerKVEngine::recoverOrphanedIdent(OperationContext* opCtx, "error"_attr = status.reason()); // If the data is unsalvageable, we should completely rebuild the ident. - return _rebuildIdent(opCtx, session, _uri(ident).c_str()); + return _rebuildIdent(session, _uri(ident).c_str()); #endif } @@ -1789,13 +1786,11 @@ void WiredTigerKVEngine::alterIdentMetadata(OperationContext* opCtx, // concurrent operations. std::string alterString = WiredTigerIndex::generateAppMetadataString(*desc) + "exclusive_refreshed=false,"; - auto status = alterMetadata(opCtx, uri, alterString); + auto status = alterMetadata(uri, alterString); invariantStatusOK(status); } -Status WiredTigerKVEngine::alterMetadata(OperationContext* opCtx, - StringData uri, - StringData config) { +Status WiredTigerKVEngine::alterMetadata(StringData uri, StringData config) { // Use a dedicated session in an alter operation to avoid transaction issues. WiredTigerSession session(_conn); auto sessionPtr = session.getSession(); @@ -1809,7 +1804,7 @@ Status WiredTigerKVEngine::alterMetadata(OperationContext* opCtx, // operation it will attempt to clean up the dirty elements during checkpointing, thus allowing // the operation to succeed if it was the only reason to fail. if (ret == EBUSY) { - _checkpoint(opCtx, sessionPtr); + _checkpoint(sessionPtr); ret = sessionPtr->alter(sessionPtr, uriNullTerminated.c_str(), configNullTerminated.c_str()); } @@ -1892,7 +1887,7 @@ bool WiredTigerKVEngine::supportsDirectoryPerDB() const { return true; } -void WiredTigerKVEngine::_checkpoint(OperationContext* opCtx, WT_SESSION* session) { +void WiredTigerKVEngine::_checkpoint(WT_SESSION* session) { // Ephemeral WiredTiger instances cannot do a checkpoint to disk as there is no disk backing // the data. if (_ephemeral) { @@ -1933,8 +1928,6 @@ void WiredTigerKVEngine::_checkpoint(OperationContext* opCtx, WT_SESSION* sessio // Third, stableTimestamp >= initialDataTimestamp: Take stable checkpoint. Steady state // case. if (initialDataTimestamp.asULL() <= 1) { - Lock::ResourceLock checkpointLock{ - opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; clearIndividuallyCheckpointedIndexes(); invariantWTOK(session->checkpoint(session, "use_timestamp=false"), session); LOGV2_FOR_RECOVERY(5576602, @@ -1958,8 +1951,6 @@ void WiredTigerKVEngine::_checkpoint(OperationContext* opCtx, WT_SESSION* sessio "oplogNeededForRollback"_attr = toString(oplogNeededForRollback)); { - Lock::ResourceLock checkpointLock{ - opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; clearIndividuallyCheckpointedIndexes(); invariantWTOK(session->checkpoint(session, "use_timestamp=true"), session); } @@ -1976,10 +1967,10 @@ void WiredTigerKVEngine::_checkpoint(OperationContext* opCtx, WT_SESSION* sessio } } -void WiredTigerKVEngine::checkpoint(OperationContext* opCtx) { +void WiredTigerKVEngine::checkpoint() { UniqueWiredTigerSession session = _sessionCache->getSession(); WT_SESSION* s = session->getSession(); - return _checkpoint(opCtx, s); + return _checkpoint(s); } bool WiredTigerKVEngine::hasIdent(OperationContext* opCtx, StringData ident) const { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h index 705f0dc04cb..0706f2f9f01 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h @@ -100,8 +100,7 @@ class WiredTigerKVEngine final : public KVEngine { public: static StringData kTableUriPrefix; - WiredTigerKVEngine(OperationContext* opCtx, - const std::string& canonicalName, + WiredTigerKVEngine(const std::string& canonicalName, const std::string& path, ClockSource* cs, const std::string& extraOpenOptions, @@ -126,7 +125,7 @@ public: return !isEphemeral(); } - void checkpoint(OperationContext* opCtx) override; + void checkpoint() override; bool isEphemeral() const override { return _ephemeral; @@ -207,7 +206,7 @@ public: const IndexDescriptor* desc, bool isForceUpdateMetadata) override; - Status alterMetadata(OperationContext* opCtx, StringData uri, StringData config); + Status alterMetadata(StringData uri, StringData config); void flushAllFiles(OperationContext* opCtx, bool callerHoldsReadLock) override; @@ -423,7 +422,7 @@ private: StorageEngine::DropIdentCallback callback; }; - void _checkpoint(OperationContext* opCtx, WT_SESSION* session); + void _checkpoint(WT_SESSION* session); /** * Opens a connection on the WiredTiger database 'path' with the configuration 'wtOpenConfig'. @@ -434,7 +433,7 @@ private: */ void _openWiredTiger(const std::string& path, const std::string& wtOpenConfig); - Status _salvageIfNeeded(OperationContext* opCtx, const char* uri); + Status _salvageIfNeeded(const char* uri); void _ensureIdentPath(StringData ident); /** @@ -444,7 +443,7 @@ private: * Returns DataModifiedByRepair if the rebuild was successful, and any other error on failure. * This will never return Status::OK(). */ - Status _rebuildIdent(OperationContext* opCtx, WT_SESSION* session, const char* uri); + Status _rebuildIdent(WT_SESSION* session, const char* uri); bool _hasUri(WT_SESSION* session, const std::string& uri) const; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp index e411277a57d..d157629e1a8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp @@ -63,7 +63,7 @@ namespace { class WiredTigerKVHarnessHelper : public KVHarnessHelper { public: WiredTigerKVHarnessHelper(ServiceContext* svcCtx, bool forRepair = false) - : _svcCtx(svcCtx), _dbpath("wt-kv-harness"), _forRepair(forRepair), _engine(makeEngine()) { + : _dbpath("wt-kv-harness"), _forRepair(forRepair), _engine(makeEngine()) { // Faitfhully simulate being in replica set mode for timestamping tests which requires // parity for journaling settings. repl::ReplSettings replSettings; @@ -94,9 +94,7 @@ private: // Use a small journal for testing to account for the unlikely event that the underlying // filesystem does not support fast allocation of a file of zeros. std::string extraStrings = "log=(file_max=1m,prealloc=false)"; - auto client = _svcCtx->makeClient("opCtx"); - return std::make_unique(client->makeOperationContext().get(), - kWiredTigerEngineName, + return std::make_unique(kWiredTigerEngineName, _dbpath.path(), _cs.get(), extraStrings, @@ -106,7 +104,6 @@ private: _forRepair); } - ServiceContext* _svcCtx; const std::unique_ptr _cs = std::make_unique(); unittest::TempDir _dbpath; bool _forRepair; @@ -548,7 +545,6 @@ TEST_F(WiredTigerKVEngineTest, TestReconfigureLog) { // Perform each test in their own limited scope in order to establish different // severity levels. { - auto opCtxRaii = _makeOperationContext(); // Set the WiredTiger Checkpoint LOGV2 component severity to the Log level. auto severityGuard = unittest::MinimumLoggedSeverityGuard{ logv2::LogComponent::kWiredTigerCheckpoint, logv2::LogSeverity::Log()}; @@ -558,7 +554,7 @@ TEST_F(WiredTigerKVEngineTest, TestReconfigureLog) { // Perform a checkpoint. The goal here is create some activity in WiredTiger in order // to generate verbose messages (we don't really care about the checkpoint itself). startCapturingLogMessages(); - _engine->checkpoint(opCtxRaii.get()); + _engine->checkpoint(); stopCapturingLogMessages(); // In this initial case, we don't expect to capture any debug checkpoint messages. The // base severity for the checkpoint component should be at Log(). @@ -573,7 +569,6 @@ TEST_F(WiredTigerKVEngineTest, TestReconfigureLog) { ASSERT_FALSE(foundWTCheckpointMessage); } { - auto opCtxRaii = _makeOperationContext(); // Set the WiredTiger Checkpoint LOGV2 component severity to the Debug(2) level. auto severityGuard = unittest::MinimumLoggedSeverityGuard{ logv2::LogComponent::kWiredTigerCheckpoint, logv2::LogSeverity::Debug(2)}; @@ -583,7 +578,7 @@ TEST_F(WiredTigerKVEngineTest, TestReconfigureLog) { // Perform another checkpoint. startCapturingLogMessages(); - _engine->checkpoint(opCtxRaii.get()); + _engine->checkpoint(); stopCapturingLogMessages(); // This time we expect to detect WiredTiger checkpoint Debug() messages. diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 8bb44e80162..7dbc44d7845 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -1736,9 +1736,6 @@ Status WiredTigerRecordStore::doCompact(OperationContext* opCtx) { if (!cache->isEphemeral()) { WT_SESSION* s = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); opCtx->recoveryUnit()->abandonSnapshot(); - // WT compact prompts WT to take checkpoints, so we need to take the checkpoint lock around - // WT compact calls. - Lock::ResourceLock checkpointLock{opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; int ret = s->compact(s, getURI().c_str(), "timeout=0"); if (MONGO_unlikely(WTCompactRecordStoreEBUSY.shouldFail())) { ret = EBUSY; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp index daf1fa0fe43..e9cdf6c25e0 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp @@ -43,8 +43,7 @@ std::string _testLoggingSettings(std::string extraStrings) { WiredTigerHarnessHelper::WiredTigerHarnessHelper(Options options, StringData extraStrings) : _dbpath("wt_test"), _lockerNoopClientObserverRegisterer(getServiceContext()), - _engine(Client::getCurrent()->makeOperationContext().get(), - kWiredTigerEngineName, + _engine(kWiredTigerEngineName, _dbpath.path(), &_cs, _testLoggingSettings(extraStrings.toString()), diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp index b0780726b3c..d6bf0e62721 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp @@ -52,8 +52,7 @@ class WiredTigerRecoveryUnitHarnessHelper final : public RecoveryUnitHarnessHelp public: WiredTigerRecoveryUnitHarnessHelper() : _dbpath("wt_test"), - _engine(Client::getCurrent()->makeOperationContext().get(), - kWiredTigerEngineName, // .canonicalName + _engine(kWiredTigerEngineName, // .canonicalName _dbpath.path(), // .path &_cs, // .cs "", // .extraOpenOptions diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp index c3cfaf7b478..22a30da3cb5 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp @@ -35,7 +35,6 @@ #include #include "mongo/base/error_codes.h" -#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/global_settings.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/storage/journal_listener.h" @@ -296,8 +295,6 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, auto config = syncType == Fsync::kCheckpointStableTimestamp ? "use_timestamp=true" : "use_timestamp=false"; { - Lock::ResourceLock checkpointLock{ - opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; _engine->clearIndividuallyCheckpointedIndexes(); invariantWTOK(s->checkpoint(s, config), s); } @@ -352,7 +349,6 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, _waitUntilDurableSession); LOGV2_DEBUG(22419, 4, "flushed journal"); } else { - Lock::ResourceLock checkpointLock{opCtx, ResourceId(RESOURCE_MUTEX, "checkpoint"), MODE_X}; _engine->clearIndividuallyCheckpointedIndexes(); invariantWTOK(_waitUntilDurableSession->checkpoint(_waitUntilDurableSession, nullptr), _waitUntilDurableSession); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp index 96974706972..ab4623f5441 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp @@ -96,8 +96,7 @@ void removeTableChecksFile() { } } -void setTableWriteTimestampAssertion(OperationContext* opCtx, - WiredTigerSessionCache* sessionCache, +void setTableWriteTimestampAssertion(WiredTigerSessionCache* sessionCache, const std::string& uri, bool on) { const std::string setting = on ? "assert=(write_timestamp=on)" : "assert=(write_timestamp=off)"; @@ -106,7 +105,7 @@ void setTableWriteTimestampAssertion(OperationContext* opCtx, "Changing table write timestamp assertion settings", "uri"_attr = uri, "writeTimestampAssertionOn"_attr = on); - auto status = sessionCache->getKVEngine()->alterMetadata(opCtx, uri, setting); + auto status = sessionCache->getKVEngine()->alterMetadata(uri, setting); if (!status.isOK()) { // Dump the storage engine's internal state to assist in diagnosis. sessionCache->getKVEngine()->dump(); @@ -562,48 +561,6 @@ size_t WiredTigerUtil::getCacheSizeMB(double requestedCacheSizeGB) { return static_cast(cacheSizeMB); } -bool WiredTigerUtil::appendCustomStats(OperationContext* opCtx, - BSONObjBuilder* output, - double scale, - const std::string& uri) { - dassert(opCtx->lockState()->isReadLocked()); - { - BSONObjBuilder metadata(output->subobjStart("metadata")); - Status status = WiredTigerUtil::getApplicationMetadata(opCtx, uri, &metadata); - if (!status.isOK()) { - metadata.append("error", "unable to retrieve metadata"); - metadata.append("code", static_cast(status.code())); - metadata.append("reason", status.reason()); - } - } - std::string type, sourceURI; - WiredTigerUtil::fetchTypeAndSourceURI(opCtx, uri, &type, &sourceURI); - StatusWith metadataResult = WiredTigerUtil::getMetadataCreate(opCtx, sourceURI); - StringData creationStringName("creationString"); - if (!metadataResult.isOK()) { - BSONObjBuilder creationString(output->subobjStart(creationStringName)); - creationString.append("error", "unable to retrieve creation config"); - creationString.append("code", static_cast(metadataResult.getStatus().code())); - creationString.append("reason", metadataResult.getStatus().reason()); - } else { - output->append(creationStringName, metadataResult.getValue()); - // Type can be "lsm" or "file" - output->append("type", type); - } - - WiredTigerSession* session = WiredTigerRecoveryUnit::get(opCtx)->getSession(); - WT_SESSION* s = session->getSession(); - Status status = - WiredTigerUtil::exportTableToBSON(s, "statistics:" + uri, "statistics=(fast)", output); - if (!status.isOK()) { - output->append("error", "unable to retrieve statistics"); - output->append("code", static_cast(status.code())); - output->append("reason", status.reason()); - } - return true; -} - - logv2::LogSeverity getWTLOGV2SeverityLevel(const BSONObj& obj) { const std::string field = "verbose_level_id"; @@ -950,7 +907,7 @@ Status WiredTigerUtil::setTableLogging(OperationContext* opCtx, const std::strin 22432, 1, "Changing table logging settings", "uri"_attr = uri, "loggingEnabled"_attr = on); // Only alter the metadata once we're sure that we need to change the table settings, since // WT_SESSION::alter may return EBUSY and require taking a checkpoint to make progress. - auto status = sessionCache->getKVEngine()->alterMetadata(opCtx, uri, setting); + auto status = sessionCache->getKVEngine()->alterMetadata(uri, setting); if (!status.isOK()) { // Dump the storage engine's internal state to assist in diagnosis. sessionCache->getKVEngine()->dump(); @@ -967,10 +924,10 @@ Status WiredTigerUtil::setTableLogging(OperationContext* opCtx, const std::strin // The write timestamp assertion setting only needs to be changed at startup. It will be turned // on when logging is disabled, and off when logging is enabled. if (TestingProctor::instance().isEnabled()) { - setTableWriteTimestampAssertion(opCtx, sessionCache, uri, !on); + setTableWriteTimestampAssertion(sessionCache, uri, !on); } else { // Disables the assertion when the testing proctor is off. - setTableWriteTimestampAssertion(opCtx, sessionCache, uri, false /* on */); + setTableWriteTimestampAssertion(sessionCache, uri, false /* on */); } return Status::OK(); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h index a042ffd0681..b17dfcb6f85 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h @@ -276,11 +276,6 @@ public: static int64_t getIdentSize(WT_SESSION* s, const std::string& uri); - static bool appendCustomStats(OperationContext* opCtx, - BSONObjBuilder* output, - double scale, - const std::string& uri); - /** * Returns the bytes available for reuse for an ident. This is the amount of allocated space on * disk that is not storing any data. diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translate/optimize_pipeline_tests.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translate/optimize_pipeline_tests.txt index dd776003d63..9f03e9a01af 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translate/optimize_pipeline_tests.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translate/optimize_pipeline_tests.txt @@ -96,18 +96,13 @@ Root [] | | scan_0 | RefBlock: | Variable [scan_0] -Sargable [Complete] -| | | | | requirementsMap: -| | | | | refProjection: scan_0, path: 'PathGet [a] PathTraverse [1] PathIdentity []', intervals: {{{[Const [1], Const [1]]}} U {{[Const [2], Const [2]]}} U {{[Const [3], Const [3]]}}} -| | | | candidateIndexes: -| | | | candidateId: 1, index1, {}, {0}, {{{[Const [1], Const [1]]}} U {{[Const [2], Const [2]]}} U {{[Const [3], Const [3]]}}} -| | | scanParams: -| | | {'a': evalTemp_0} -| | | residualReqs: -| | | refProjection: evalTemp_0, path: 'PathTraverse [1] PathIdentity []', intervals: {{{[Const [1], Const [1]]}} U {{[Const [2], Const [2]]}} U {{[Const [3], Const [3]]}}}, entryIndex: 0 -| | BindBlock: -| RefBlock: -| Variable [scan_0] +Filter [] +| EvalFilter [] +| | Variable [scan_0] +| PathGet [a] +| PathTraverse [1] +| PathCompare [EqMember] +| Const [[1, 2, 3]] Scan [collection] BindBlock: [scan_0] diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt index 8d7a52033c4..f7f248ddb74 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt @@ -1,48 +1,48 @@ ==== VARIATION: sbe, query={}, sort={}, proj={} -YW4ABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXg== +YW4ABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={$or: [{a: 1}, {b: 2}]}, sort={}, proj={} -b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAABubm5uBQAAAABmXg== +b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={b: 1}, sort={}, proj={} -ZXEAYj8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYj8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1, b: 1, c: 1}, sort={}, proj={} -YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={a: 1}, proj={} -YW4ABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +YW4ABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={}, sort={a: -1}, proj={} -YW4ABQAAAAB+ZGEAAAAAAAAAAABubm5uBQAAAABmXg== +YW4ABQAAAAB+ZGEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={} -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={a: 1} -ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAbm5ubgUAAAAAZl4= +ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={a: 1}, proj={a: 1} -YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={a: 1}, proj={a: 1} -YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={}, proj={a: 1} -YW4ADAAAABBhAAEAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4ADAAAABBhAAEAAAAAAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={}, proj={a: true} -YW4ACQAAAAhhAAEAAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4ACQAAAAhhAAEAAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={}, sort={}, proj={a: false} -YW4ACQAAAAhhAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4= +YW4ACQAAAAhhAAAAAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=1, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAB0bm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAHRubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABmbm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAGZubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=1, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABudG5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG50bm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubmZuBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5uZm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEACgAAAAAAAAAAAAAAbm5ubgUAAAAAZl4= +ZXEAYT8AAAAABQAAAAB+YWEKAAAAAAAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAoAAAAAAAAAbm5ubgUAAAAAZl4= +ZXEAYT8AAAAABQAAAAB+YWEAAAAACgAAAAAAAABubm5uBQAAAABmXg== ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=1 -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubnRuGAAAABIkcmVjb3JkSWQAAQAAAAAAAAAAZl4= +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAABmXg== diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt index 532045bed16..15084d41d0e 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt @@ -1,12 +1,12 @@ ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXloAAAADJGxvb2t1cABMAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwADAAAAYXMAAmxvY2FsRmllbGQAAgAAAGEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAA== +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAA ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXlsAAAADJGxvb2t1cABNAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwADAAAAYXMAAmxvY2FsRmllbGQAAwAAAGExAAJmb3JlaWduRmllbGQAAgAAAGIAAAA= +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAADAAAAYTEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAA== ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXlsAAAADJGxvb2t1cABNAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwADAAAAYXMAAmxvY2FsRmllbGQAAgAAAGEAAmZvcmVpZ25GaWVsZAADAAAAYjEAAAA= +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAMAAABiMQAAAA== ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXlsAAAADJGxvb2t1cABNAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwAEAAAAYXMxAAJsb2NhbEZpZWxkAAIAAABhAAJmb3JlaWduRmllbGQAAgAAAGIAAAA= +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAgAAAGEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAA== ==== VARIATION: sbe, query={a: 1}, sort={}, proj={} -ZXEAYT8AAAAABQAAAAAAAAAAAAAAAABubm5uBQAAAABmXloAAAADJGxvb2t1cABMAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwADAAAAYXMAAmxvY2FsRmllbGQAAgAAAGEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAF0AAAADJGxvb2t1cABPAAAAAmZyb20ADAAAAGZvcmVpZ25jb2xsAAJhcwAEAAAAYXMxAAJsb2NhbEZpZWxkAAMAAABhMQACZm9yZWlnbkZpZWxkAAMAAABiMQAAAA== +ZXEAYT8AAAAABQAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAAXQAAAAMkbG9va3VwAE8AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAwAAAGExAAJmb3JlaWduRmllbGQAAwAAAGIxAAAA diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt index 8177ce2bbc8..5e494c83f72 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt @@ -1,6 +1,6 @@ ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAABmXg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAGZe ==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0 -ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAABubm5uBQAAAAB0Xg== +ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAG5ubm4FAAAAAHRe diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp index 2b81449e5ed..691a7da6365 100644 --- a/src/mongo/db/transaction/transaction_participant.cpp +++ b/src/mongo/db/transaction/transaction_participant.cpp @@ -1681,7 +1681,11 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( auto opObserver = opCtx->getServiceContext()->getOpObserver(); const auto wallClockTime = opCtx->getServiceContext()->getFastClockSource()->now(); auto applyOpsOplogSlotAndOperationAssignment = opObserver->preTransactionPrepare( - opCtx, reservedSlots, wallClockTime, completedTransactionOperations); + opCtx, + reservedSlots, + p().transactionOperations.getNumberOfPrePostImagesToWrite(), + wallClockTime, + completedTransactionOperations); opCtx->recoveryUnit()->setPrepareTimestamp(prepareOplogSlot.getTimestamp()); opCtx->getWriteUnitOfWork()->prepare(); diff --git a/src/mongo/db/transaction/transaction_participant_test.cpp b/src/mongo/db/transaction/transaction_participant_test.cpp index b6b5012ab6f..341334fcc4a 100644 --- a/src/mongo/db/transaction/transaction_participant_test.cpp +++ b/src/mongo/db/transaction/transaction_participant_test.cpp @@ -102,6 +102,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) override; @@ -156,6 +157,7 @@ public: std::unique_ptr OpObserverMock::preTransactionPrepare(OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) { return std::make_unique( diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp index 6e4409015d2..aaa25f02cc0 100644 --- a/src/mongo/db/ttl.cpp +++ b/src/mongo/db/ttl.cpp @@ -495,7 +495,7 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, "error"_attr = ex); return false; } catch (const DBException& ex) { - if (!opCtx->checkForInterruptNoAssert().isOK()) { + if (opCtx->isKillPending()) { // The exception is relevant to the entire TTL monitoring process, not just the specific // TTL index. Let the exception escape so it can be addressed at the higher monitoring // layer. diff --git a/src/mongo/db/vector_clock_mongod_test.cpp b/src/mongo/db/vector_clock_mongod_test.cpp index 43c4e4d8b06..59ccf7b2ca5 100644 --- a/src/mongo/db/vector_clock_mongod_test.cpp +++ b/src/mongo/db/vector_clock_mongod_test.cpp @@ -37,7 +37,6 @@ #include "mongo/db/op_observer/oplog_writer_mock.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" -#include "mongo/db/server_options.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/unittest/death_test.h" #include "mongo/util/clock_source_mock.h" @@ -68,9 +67,6 @@ protected: auto validator = std::make_unique(_keyManager); validator->init(getServiceContext()); LogicalTimeValidator::set(getServiceContext(), std::move(validator)); - - // Ensure that this node is neither "config server" nor "shard server". - serverGlobalParams.clusterRole = ClusterRole::None; } void tearDown() override { diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp index 837828ce218..140af5331a4 100644 --- a/src/mongo/db/views/view_catalog_test.cpp +++ b/src/mongo/db/views/view_catalog_test.cpp @@ -759,7 +759,7 @@ public: }; TEST_F(ServerlessViewCatalogFixture, LookupExistingViewBeforeAndAfterDropFeatureFlagOff) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const NamespaceString viewName(db()->name(), "view"); const NamespaceString viewOn(db()->name(), "coll"); @@ -771,7 +771,7 @@ TEST_F(ServerlessViewCatalogFixture, LookupExistingViewBeforeAndAfterDropFeature } TEST_F(ServerlessViewCatalogFixture, LookupExistingViewBeforeAndAfterDropFeatureFlagOn) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); const NamespaceString viewName(db()->name(), "view"); const NamespaceString viewOn(db()->name(), "coll"); @@ -784,7 +784,7 @@ TEST_F(ServerlessViewCatalogFixture, LookupExistingViewBeforeAndAfterDropFeature } TEST_F(ServerlessViewCatalogFixture, ModifyViewBelongingToTenantFeatureFlagOff) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); const NamespaceString viewName(db()->name(), "db1.view"); const NamespaceString viewOn(db()->name(), "db2.coll"); @@ -799,7 +799,7 @@ TEST_F(ServerlessViewCatalogFixture, ModifyViewBelongingToTenantFeatureFlagOff) } TEST_F(ServerlessViewCatalogFixture, ModifyViewBelongingToTenantFeatureFlagOn) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); const NamespaceString viewName(db()->name(), "db1.view"); const NamespaceString viewOn(db()->name(), "db2.coll"); diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index fc9b1f82f7e..393a3f75853 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -102,7 +102,7 @@ protected: wunit.commit(); abortOnExit.dismiss(); } catch (const DBException&) { - if (!_opCtx->checkForInterruptNoAssert().isOK()) + if (_opCtx->isKillPending()) return true; throw; diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp index 484c479da7d..b9228513cf6 100644 --- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp +++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp @@ -435,7 +435,7 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -446,7 +446,7 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -456,7 +456,7 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -481,7 +481,7 @@ TEST(MockDBClientConnTest, CyclingCmd) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); ASSERT(response["isMaster"].trueValue()); @@ -492,7 +492,7 @@ TEST(MockDBClientConnTest, CyclingCmd) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); ASSERT(!response["isMaster"].trueValue()); @@ -503,7 +503,7 @@ TEST(MockDBClientConnTest, CyclingCmd) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << 1), response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); ASSERT(response["isMaster"].trueValue()); @@ -520,7 +520,7 @@ TEST(MockDBClientConnTest, MultipleStoredResponse) { MockDBClientConnection conn(&server); { BSONObj response; - ASSERT(conn.runCommand("foo", + ASSERT(conn.runCommand("foo.baz", BSON("isMaster" << "abc"), response)); @@ -529,7 +529,7 @@ TEST(MockDBClientConnTest, MultipleStoredResponse) { { BSONObj response; - ASSERT(!conn.runCommand("a", BSON("serverStatus" << 1), response)); + ASSERT(!conn.runCommand("a.b", BSON("serverStatus" << 1), response)); } } @@ -542,14 +542,14 @@ TEST(MockDBClientConnTest, CmdCount) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("foo", BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand("foo.bar", BSON("serverStatus" << 1), response)); ASSERT_EQUALS(1U, server.getCmdCount()); } { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand("baz", BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand("baz.bar", BSON("serverStatus" << 1), response)); ASSERT_EQUALS(2U, server.getCmdCount()); } } @@ -572,7 +572,7 @@ TEST(MockDBClientConnTest, Shutdown) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT_THROWS(conn.runCommand("test", BSON("serverStatus" << 1), response), + ASSERT_THROWS(conn.runCommand("test.user", BSON("serverStatus" << 1), response), mongo::NetworkException); } @@ -590,7 +590,7 @@ TEST(MockDBClientConnTest, Restart) { // new instance still has it conn1.find(FindCommandRequest(NamespaceString("test.user"))); BSONObj response; - conn1.runCommand("test", BSON("serverStatus" << 1), response); + conn1.runCommand("test.user", BSON("serverStatus" << 1), response); server.shutdown(); ASSERT_THROWS(conn1.find(FindCommandRequest(NamespaceString("test.user"))), @@ -629,7 +629,7 @@ TEST(MockDBClientConnTest, ClearCounter) { MockDBClientConnection conn(&server); conn.find(FindCommandRequest(FindCommandRequest(NamespaceString("test.user")))); BSONObj response; - conn.runCommand("test", BSON("serverStatus" << 1), response); + conn.runCommand("test.user", BSON("serverStatus" << 1), response); server.clearCounters(); ASSERT_EQUALS(0U, server.getQueryCount()); @@ -656,7 +656,7 @@ TEST(MockDBClientConnTest, Delay) { { mongo::Timer timer; BSONObj response; - conn.runCommand("x", BSON("serverStatus" << 1), response); + conn.runCommand("x.x", BSON("serverStatus" << 1), response); const int nowInMilliSec = timer.millis(); ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130); } diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp index f82063033c4..e3461193747 100644 --- a/src/mongo/dbtests/mock_replica_set_test.cpp +++ b/src/mongo/dbtests/mock_replica_set_test.cpp @@ -80,7 +80,8 @@ TEST(MockReplicaSetTest, IsMasterNode0) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); - bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse); + bool ok = + MockDBClientConnection(node).runCommand("foo.bar", BSON("ismaster" << 1), cmdResponse); ASSERT(ok); ASSERT(cmdResponse["ismaster"].trueValue()); @@ -107,7 +108,8 @@ TEST(MockReplicaSetTest, IsMasterNode1) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n1:27017"); - bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse); + bool ok = + MockDBClientConnection(node).runCommand("foo.bar", BSON("ismaster" << 1), cmdResponse); ASSERT(ok); ASSERT(!cmdResponse["ismaster"].trueValue()); @@ -134,7 +136,8 @@ TEST(MockReplicaSetTest, IsMasterNode2) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); - bool ok = MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse); + bool ok = + MockDBClientConnection(node).runCommand("foo.bar", BSON("ismaster" << 1), cmdResponse); ASSERT(ok); ASSERT(!cmdResponse["ismaster"].trueValue()); @@ -161,8 +164,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode0) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); - bool ok = - MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse); + bool ok = MockDBClientConnection(node).runCommand( + "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -194,8 +197,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode1) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n1:27017"); - bool ok = - MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse); + bool ok = MockDBClientConnection(node).runCommand( + "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -229,8 +232,8 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode2) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); - bool ok = - MockDBClientConnection(node).runCommand("foo", BSON("replSetGetStatus" << 1), cmdResponse); + bool ok = MockDBClientConnection(node).runCommand( + "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -300,7 +303,7 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); bool ok = - MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse); + MockDBClientConnection(node).runCommand("foo.bar", BSON("ismaster" << 1), cmdResponse); ASSERT(ok); ASSERT(cmdResponse["ismaster"].trueValue()); @@ -328,7 +331,7 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode(hostToRemove); bool ok = - MockDBClientConnection(node).runCommand("foo", BSON("ismaster" << 1), cmdResponse); + MockDBClientConnection(node).runCommand("foo.bar", BSON("ismaster" << 1), cmdResponse); ASSERT(ok); ASSERT(!cmdResponse["ismaster"].trueValue()); @@ -351,7 +354,7 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); bool ok = MockDBClientConnection(node).runCommand( - "foo", BSON("replSetGetStatus" << 1), cmdResponse); + "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -384,7 +387,7 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode(hostToRemove); bool ok = MockDBClientConnection(node).runCommand( - "foo", BSON("replSetGetStatus" << 1), cmdResponse); + "foo.bar", BSON("replSetGetStatus" << 1), cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); diff --git a/src/mongo/idl/SConscript b/src/mongo/idl/SConscript index e892a326d39..489dcc9ef56 100644 --- a/src/mongo/idl/SConscript +++ b/src/mongo/idl/SConscript @@ -114,7 +114,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/server_feature_flags', '$BUILD_DIR/mongo/db/server_options_core', '$BUILD_DIR/mongo/db/service_context', - '$BUILD_DIR/mongo/rpc/message', '$BUILD_DIR/mongo/util/cmdline_utils/cmdline_utils', '$BUILD_DIR/mongo/util/options_parser/options_parser', 'cluster_server_parameter', diff --git a/src/mongo/idl/cluster_server_parameter_op_observer.h b/src/mongo/idl/cluster_server_parameter_op_observer.h index 0de4a831616..54d7b6aace4 100644 --- a/src/mongo/idl/cluster_server_parameter_op_observer.h +++ b/src/mongo/idl/cluster_server_parameter_op_observer.h @@ -108,8 +108,7 @@ public: BSONObj indexDoc) final {} void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; + const UUID& globalIndexUUID) final{}; void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -221,6 +220,7 @@ public: std::unique_ptr preTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, + size_t numberOfPrePostImagesToWrite, Date_t wallClockTime, std::vector* statements) final { return nullptr; diff --git a/src/mongo/rpc/SConscript b/src/mongo/rpc/SConscript index 4ec2445e872..4d88e49c8b0 100644 --- a/src/mongo/rpc/SConscript +++ b/src/mongo/rpc/SConscript @@ -123,7 +123,6 @@ env.Library( '$BUILD_DIR/mongo/db/signed_logical_time', '$BUILD_DIR/mongo/db/write_block_bypass', 'client_metadata', - 'message', 'metadata_impersonated_user', ], LIBDEPS_PRIVATE=[ diff --git a/src/mongo/rpc/op_msg.cpp b/src/mongo/rpc/op_msg.cpp index e6213828ed4..be420d55e50 100644 --- a/src/mongo/rpc/op_msg.cpp +++ b/src/mongo/rpc/op_msg.cpp @@ -251,65 +251,6 @@ OpMsg OpMsg::parse(const Message& message, Client* client) try { throw; } -OpMsgRequest OpMsgRequest::fromDBAndBody(StringData db, BSONObj body, const BSONObj& extraFields) { - return OpMsgRequestBuilder::create(db, std::move(body), extraFields); -} - -boost::optional parseDollarTenant(const BSONObj body) { - auto tenant = body.getField("$tenant"); - if (tenant) { - return TenantId::parseFromBSON(body.getField("$tenant")); - } else { - return boost::none; - } -} - -void appendDollarTenant(BSONObjBuilder& builder, - const TenantId& tenant, - boost::optional originalTenant = boost::none) { - if (originalTenant) { - massert(8423373, - str::stream() << "Unable to set TenantId '" << tenant - << "' on OpMsgRequest as it already has " - << originalTenant->toString(), - tenant == originalTenant.value()); - } else { - tenant.serializeToBSON("$tenant", &builder); - } -} - -void OpMsgRequest::setDollarTenant(const TenantId& tenant) { - massert(8423372, - str::stream() << "Should not set dollar tenant " << tenant - << " on the validated OpMsgRequest.", - !validatedTenancyScope); - - auto dollarTenant = parseDollarTenant(body); - BSONObjBuilder bodyBuilder(std::move(body)); - appendDollarTenant(bodyBuilder, tenant, dollarTenant); - body = bodyBuilder.obj(); -} - -OpMsgRequest OpMsgRequestBuilder::create(StringData db, BSONObj body, const BSONObj& extraFields) { - return create({boost::none, db}, std::move(body), extraFields); -} - -OpMsgRequest OpMsgRequestBuilder::create(const DatabaseName& dbName, - BSONObj body, - const BSONObj& extraFields) { - auto dollarTenant = parseDollarTenant(body); - BSONObjBuilder bodyBuilder(std::move(body)); - bodyBuilder.appendElements(extraFields); - bodyBuilder.append("$db", dbName.db()); - if (dbName.tenantId()) { - appendDollarTenant(bodyBuilder, dbName.tenantId().value(), dollarTenant); - } - - OpMsgRequest request; - request.body = bodyBuilder.obj(); - return request; -} - namespace { void serializeHelper(const std::vector& sequences, const BSONObj& body, diff --git a/src/mongo/rpc/op_msg.h b/src/mongo/rpc/op_msg.h index a1333446265..243dbf9a344 100644 --- a/src/mongo/rpc/op_msg.h +++ b/src/mongo/rpc/op_msg.h @@ -188,7 +188,18 @@ struct OpMsgRequest : public OpMsg { return OpMsgRequest(OpMsg::parseOwned(message, client)); } - static OpMsgRequest fromDBAndBody(StringData db, BSONObj body, const BSONObj& extraFields = {}); + static OpMsgRequest fromDBAndBody(StringData db, + BSONObj body, + const BSONObj& extraFields = {}) { + OpMsgRequest request; + request.body = ([&] { + BSONObjBuilder bodyBuilder(std::move(body)); + bodyBuilder.appendElements(extraFields); + bodyBuilder.append("$db", db); + return bodyBuilder.obj(); + }()); + return request; + } StringData getDatabase() const { if (auto elem = body["$db"]) @@ -200,8 +211,6 @@ struct OpMsgRequest : public OpMsg { return body.firstElementFieldName(); } - void setDollarTenant(const TenantId& tenant); - // DO NOT ADD MEMBERS! Since this type is essentially a strong typedef (see the class comment), // it should not hold more data than an OpMsg. It should be freely interconvertible with OpMsg // without issues like slicing. @@ -408,15 +417,4 @@ private: const int _sizeOffset; }; -/** - * Builds an OpMsgRequest object. - */ -struct OpMsgRequestBuilder { -public: - static OpMsgRequest create(StringData db, BSONObj body, const BSONObj& extraFields = {}); - static OpMsgRequest create(const DatabaseName& dbName, - BSONObj body, - const BSONObj& extraFields = {}); -}; - } // namespace mongo diff --git a/src/mongo/rpc/op_msg_test.cpp b/src/mongo/rpc/op_msg_test.cpp index d5287b18144..ea07a42ea31 100644 --- a/src/mongo/rpc/op_msg_test.cpp +++ b/src/mongo/rpc/op_msg_test.cpp @@ -43,9 +43,7 @@ #include "mongo/db/jsobj.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/unittest.h" #include "mongo/util/hex.h" @@ -818,7 +816,7 @@ protected: }; TEST_F(OpMsgWithAuth, ParseValidatedTenancyScopeFromSecurityToken) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + gMultitenancySupport = true; const auto kTenantId = TenantId(OID::gen()); const auto token = makeSecurityToken(UserName("user", "admin", kTenantId)); @@ -847,7 +845,7 @@ TEST_F(OpMsgWithAuth, ParseValidatedTenancyScopeFromSecurityToken) { } TEST_F(OpMsgWithAuth, ParseValidatedTenancyScopeFromDollarTenant) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + gMultitenancySupport = true; AuthorizationSessionImplTestHelper::grantUseTenant(*(client.get())); const auto kTenantId = TenantId(OID::gen()); @@ -872,7 +870,7 @@ TEST_F(OpMsgWithAuth, ParseValidatedTenancyScopeFromDollarTenant) { } TEST_F(OpMsgWithAuth, ValidatedTenancyScopeShouldNotBeSerialized) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); + gMultitenancySupport = true; AuthorizationSessionImplTestHelper::grantUseTenant(*(client.get())); const auto kTenantId = TenantId(OID::gen()); @@ -936,44 +934,6 @@ TEST(OpMsgRequest, GetDatabaseThrowsMissing) { ASSERT_THROWS(msg.getDatabase(), AssertionException); } -TEST(OpMsgRequestBuilder, WithTenantInDatabaseName) { - const TenantId tenantId(OID::gen()); - auto const body = fromjson("{ping: 1}"); - OpMsgRequest msg = OpMsgRequestBuilder::create({tenantId, "testDb"}, body); - ASSERT_EQ(msg.body.getField("$tenant").eoo(), false); - ASSERT_EQ(TenantId::parseFromBSON(msg.body.getField("$tenant")), tenantId); -} - -TEST(OpMsgRequestBuilder, WithSameTenantInBody) { - const TenantId tenantId(OID::gen()); - auto const body = BSON("ping" << 1 << "$tenant" << tenantId); - OpMsgRequest msg = OpMsgRequestBuilder::create({tenantId, "testDb"}, body); - ASSERT_EQ(msg.body.getField("$tenant").eoo(), false); - ASSERT_EQ(TenantId::parseFromBSON(msg.body.getField("$tenant")), tenantId); -} - -TEST(OpMsgRequestBuilder, FailWithDiffTenantInBody) { - const TenantId tenantId(OID::gen()); - const TenantId otherTenantId(OID::gen()); - - auto const body = BSON("ping" << 1 << "$tenant" << tenantId); - ASSERT_THROWS_CODE( - OpMsgRequestBuilder::create({otherTenantId, "testDb"}, body), DBException, 8423373); -} - -TEST(OpMsgRequestBuilder, FromDatabaseNameAndBodyDoesNotCopy) { - const TenantId tenantId(OID::gen()); - auto body = fromjson("{ping: 1}"); - const void* const bodyPtr = body.objdata(); - auto msg = OpMsgRequestBuilder::create({tenantId, "db"}, std::move(body)); - - auto const newBody = BSON("ping" << 1 << "$db" - << "db" - << "$tenant" << tenantId); - ASSERT_BSONOBJ_EQ(msg.body, newBody); - ASSERT_EQ(static_cast(msg.body.objdata()), bodyPtr); -} - TEST(OpMsgRequest, FromDbAndBodyDoesNotCopy) { auto body = fromjson("{ping: 1}"); const void* const bodyPtr = body.objdata(); @@ -983,52 +943,6 @@ TEST(OpMsgRequest, FromDbAndBodyDoesNotCopy) { ASSERT_EQ(static_cast(msg.body.objdata()), bodyPtr); } -TEST(OpMsgRequest, SetDollarTenantHasSameTenant) { - const TenantId tenantId(OID::gen()); - // Set $tenant on a OpMsgRequest which already has the same $tenant. - OpMsgRequest request; - request.body = BSON("ping" << 1 << "$tenant" << tenantId << "$db" - << "testDb"); - request.setDollarTenant(tenantId); - - auto dollarTenant = request.body.getField("$tenant"); - ASSERT(!dollarTenant.eoo()); - ASSERT_EQ(TenantId::parseFromBSON(dollarTenant), tenantId); -} - -TEST(OpMsgRequest, SetDollarTenantHasNoTenant) { - const TenantId tenantId(OID::gen()); - // Set $tenant on a OpMsgRequest which has no $tenant. - OpMsgRequest request; - request.body = BSON("ping" << 1 << "$db" - << "testDb"); - request.setDollarTenant(tenantId); - - auto dollarTenant = request.body.getField("$tenant"); - ASSERT(!dollarTenant.eoo()); - ASSERT_EQ(TenantId::parseFromBSON(dollarTenant), tenantId); -} - -TEST(OpMsgRequest, SetDollarTenantFailWithDiffTenant) { - const TenantId tenantId(OID::gen()); - auto const body = BSON("ping" << 1 << "$tenant" << tenantId); - auto request = OpMsgRequest::fromDBAndBody("testDb", body); - ASSERT_THROWS_CODE(request.setDollarTenant(TenantId(OID::gen())), DBException, 8423373); -} - -TEST_F(OpMsgWithAuth, SetDollarTenantFailWithVTS) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); - AuthorizationSessionImplTestHelper::grantUseTenant(*(client.get())); - - const auto kTenantId = TenantId(OID::gen()); - const auto body = BSON("ping" << 1 << "$tenant" << kTenantId); - auto msg = OpMsgBytes{kNoFlags, kBodySection, body}.parse(client.get()); - OpMsgRequest request(std::move(msg)); - - ASSERT(request.validatedTenancyScope); - ASSERT_THROWS_CODE(request.setDollarTenant(TenantId(OID::gen())), DBException, 8423372); -} - TEST(OpMsgTest, ChecksumResizesMessage) { auto msg = OpMsgBytes{kNoFlags, // kBodySection, diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript index 142287abd0f..af3f2361f1b 100644 --- a/src/mongo/s/SConscript +++ b/src/mongo/s/SConscript @@ -136,7 +136,6 @@ env.Library( target='common_s', source=[ 'analyze_shard_key_cmd.idl', - 'analyze_shard_key_documents.idl', 'analyze_shard_key_feature_flag.idl', 'analyze_shard_key_server_parameters.idl', 'cannot_implicitly_create_collection_info.cpp', @@ -215,7 +214,6 @@ env.Library( '$BUILD_DIR/mongo/db/query/query_request', '$BUILD_DIR/mongo/db/repl/optime', '$BUILD_DIR/mongo/db/server_options', - '$BUILD_DIR/mongo/rpc/message', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/server_base', diff --git a/src/mongo/s/analyze_shard_key_documents.idl b/src/mongo/s/analyze_shard_key_documents.idl deleted file mode 100644 index c7254322a9e..00000000000 --- a/src/mongo/s/analyze_shard_key_documents.idl +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2022-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - - "mongo/s/configure_query_analyzer_cmd.idl" - -structs: - QueryAnalyzerDocument: - description: "Represents settings for query sampling for one collection." - fields: - _id: - type: uuid - description: "The UUID of the collection being sampled." - cpp_name: collectionUuid - ns: - type: namespacestring - description: "The namespace of the collectiong being sampled." - inline_chained_structs: true - chained_structs: - queryAnalyzerConfiguration: configuration diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h index 4567e6cdada..469a63a218c 100644 --- a/src/mongo/s/catalog/type_collection.h +++ b/src/mongo/s/catalog/type_collection.h @@ -30,8 +30,6 @@ #pragma once #include "mongo/s/catalog/type_collection_gen.h" -#include "mongo/s/chunk_version.h" -#include "mongo/s/index_version.h" namespace mongo { @@ -113,7 +111,6 @@ public: using CollectionTypeBase::getUuid; using CollectionTypeBase::setDefragmentationPhase; using CollectionTypeBase::setDefragmentCollection; - using CollectionTypeBase::setIndexVersion; using CollectionTypeBase::setKeyPattern; using CollectionTypeBase::setNss; using CollectionTypeBase::setReshardingFields; @@ -175,15 +172,8 @@ public: CollectionTypeBase::setAllowMigrations(false); } - CollectionIndexes getIndexVersion() const { - return CollectionIndexes({getEpoch(), CollectionTypeBase::getTimestamp()}, - CollectionTypeBase::getIndexVersion()); - } - - void setIndexVersion(CollectionIndexes indexVersion) { - setEpoch(indexVersion.epoch()); - setTimestamp(indexVersion.getTimestamp()); - CollectionTypeBase::setIndexVersion(indexVersion.indexVersion()); + Timestamp getIndexVersion() const { + return CollectionTypeBase::getIndexVersion().get_value_or(Timestamp(0, 0)); } // TODO SERVER-61033: remove after permitMigrations have been merge with allowMigrations. diff --git a/src/mongo/s/catalog/type_collection.idl b/src/mongo/s/catalog/type_collection.idl index 89661a8fba4..3f44a15a349 100644 --- a/src/mongo/s/catalog/type_collection.idl +++ b/src/mongo/s/catalog/type_collection.idl @@ -158,5 +158,6 @@ structs: indexVersion: type: timestamp description: "Current collection index version. It will tick everytime a global index - is created or dropped." + is created or dropped. If not set, then we can assume it's value is + Timestamp(0, 0)." optional: true diff --git a/src/mongo/s/catalog/type_namespace_placement.idl b/src/mongo/s/catalog/type_namespace_placement.idl index 70efc024470..3e724b9481f 100644 --- a/src/mongo/s/catalog/type_namespace_placement.idl +++ b/src/mongo/s/catalog/type_namespace_placement.idl @@ -46,12 +46,6 @@ structs: type: namespacestring description: "The namespace (database or full collection name) referenced by this object." - uuid: - cpp_name: uuid - type: uuid - description: "When nss references a collection, this field stores the uuid assigned - to such collection at the time of its creation." - optional: true timestamp: type: timestamp description: "The point in time at which this version of NamespacePlacementType diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index ba0a6826933..66025644cd7 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -33,7 +33,6 @@ #include "mongo/db/curop.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/repl/optime_with.h" -#include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" @@ -55,13 +54,12 @@ namespace { MONGO_FAIL_POINT_DEFINE(blockCollectionCacheLookup); -// How many times to try refreshing the routing info or the index info of a collection if the -// information loaded from the config server is found to be inconsistent. -const int kMaxInconsistentCollectionRefreshAttempts = 3; +// How many times to try refreshing the routing info if the set of chunks loaded from the config +// server is found to be inconsistent. +const int kMaxInconsistentRoutingInfoRefreshAttempts = 3; const int kDatabaseCacheSize = 10000; const int kCollectionCacheSize = 10000; -const int kIndexCacheSize = 10000; const OperationContext::Decoration operationShouldBlockBehindCatalogCacheRefresh = OperationContext::declareDecoration(); @@ -236,8 +234,7 @@ CatalogCache::CatalogCache(ServiceContext* const service, CatalogCacheLoader& ca return options; }())), _databaseCache(service, *_executor, _cacheLoader), - _collectionCache(service, *_executor, _cacheLoader), - _indexCache(service, *_executor) { + _collectionCache(service, *_executor, _cacheLoader) { _executor->startup(); } @@ -369,7 +366,7 @@ StatusWith CatalogCache::_getCollectionRoutingInfoAt( "namespace"_attr = nss, "exception"_attr = redact(ex)); acquireTries++; - if (acquireTries == kMaxInconsistentCollectionRefreshAttempts) { + if (acquireTries == kMaxInconsistentRoutingInfoRefreshAttempts) { return ex.toStatus(); } } @@ -394,92 +391,6 @@ StatusWith CatalogCache::getCollectionRoutingInfoAt(OperationConte return _getCollectionRoutingInfoAt(opCtx, nss, atClusterTime, false); } -GlobalIndexesCache CatalogCache::getCollectionIndexInfo(OperationContext* opCtx, - const NamespaceString& nss, - bool allowLocks) { - return _getCollectionIndexInfoAt(opCtx, nss, boost::none, allowLocks); -} - -GlobalIndexesCache CatalogCache::getCollectionIndexInfoAt(OperationContext* opCtx, - const NamespaceString& nss, - Timestamp atClusterTime) { - return _getCollectionIndexInfoAt(opCtx, nss, atClusterTime, false); -} - -GlobalIndexesCache CatalogCache::_getCollectionIndexInfoAt(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional atClusterTime, - bool allowLocks) { - if (!allowLocks) { - invariant(!opCtx->lockState() || !opCtx->lockState()->isLocked(), - "Do not hold a lock while refreshing the catalog cache. Doing so would " - "potentially hold " - "the lock during a network call, and can lead to a deadlock as described in " - "SERVER-37398."); - } - - const auto swDbInfo = getDatabase(opCtx, nss.db(), allowLocks); - if (!swDbInfo.isOK()) { - if (swDbInfo == ErrorCodes::NamespaceNotFound) { - LOGV2_FOR_CATALOG_REFRESH( - 6686300, - 2, - "Invalidating cached index entry because its database has been dropped", - "namespace"_attr = nss); - invalidateIndexEntry_LINEARIZABLE(nss); - } - uasserted(swDbInfo.getStatus().code(), - str::stream() << "Error getting database info for index refresh: " - << swDbInfo.getStatus().reason()); - } - - const auto dbInfo = std::move(swDbInfo.getValue()); - - auto indexEntryFuture = _indexCache.acquireAsync(nss, CacheCausalConsistency::kLatestKnown); - - if (allowLocks) { - // When allowLocks is true we may be holding a lock, so we don't - // want to block the current thread: if the future is ready let's - // use it, otherwise return an error - uassert(ShardCannotRefreshDueToLocksHeldInfo(nss), - "Index info refresh did not complete", - indexEntryFuture.isReady()); - return *indexEntryFuture.get(opCtx); - } - - // From this point we can guarantee that allowLocks is false - size_t acquireTries = 0; - - operationBlockedBehindCatalogCacheRefresh(opCtx) = true; - - while (true) { - try { - auto indexEntry = indexEntryFuture.get(opCtx); - - return std::move(*indexEntry); - } catch (const DBException& ex) { - bool isCatalogCacheRetriableError = ex.isA() || - ex.code() == ErrorCodes::ConflictingOperationInProgress || - ex.code() == ErrorCodes::QueryPlanKilled; - if (!isCatalogCacheRetriableError) { - throw; - } - - LOGV2_FOR_CATALOG_REFRESH(6686301, - 0, - "Index refresh failed", - "namespace"_attr = nss, - "exception"_attr = redact(ex)); - acquireTries++; - if (acquireTries == kMaxInconsistentCollectionRefreshAttempts) { - throw; - } - } - - indexEntryFuture = _indexCache.acquireAsync(nss, CacheCausalConsistency::kLatestKnown); - } -} - StatusWith CatalogCache::getDatabaseWithRefresh(OperationContext* opCtx, StringData dbName) { _databaseCache.advanceTimeInStore( @@ -495,14 +406,6 @@ StatusWith CatalogCache::getCollectionRoutingInfoWithRefresh( return getCollectionRoutingInfo(opCtx, nss); } -GlobalIndexesCache CatalogCache::getCollectionIndexInfoWithRefresh(OperationContext* opCtx, - const NamespaceString& nss) { - _indexCache.advanceTimeInStore( - nss, ComparableIndexVersion::makeComparableIndexVersionForForcedRefresh()); - setOperationShouldBlockBehindCatalogCacheRefresh(opCtx, true); - return getCollectionIndexInfo(opCtx, nss); -} - ChunkManager CatalogCache::getShardedCollectionRoutingInfo(OperationContext* opCtx, const NamespaceString& nss) { auto cm = uassertStatusOK(getCollectionRoutingInfo(opCtx, nss)); @@ -559,15 +462,9 @@ void CatalogCache::invalidateShardOrEntireCollectionEntryForShardedCollection( ? ComparableChunkVersion::makeComparableChunkVersion(*wantedVersion) : ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh(); - const bool routingInfoTimeAdvanced = _collectionCache.advanceTimeInStore(nss, newChunkVersion); + const bool timeAdvanced = _collectionCache.advanceTimeInStore(nss, newChunkVersion); - const auto newIndexVersion = wantedVersion - ? ComparableIndexVersion::makeComparableIndexVersion(*wantedVersion) - : ComparableIndexVersion::makeComparableIndexVersionForForcedRefresh(); - - _indexCache.advanceTimeInStore(nss, newIndexVersion); - - if (routingInfoTimeAdvanced && collectionEntry && collectionEntry->optRt) { + if (timeAdvanced && collectionEntry && collectionEntry->optRt) { // Shards marked stale will be reset on the next refresh. // We can mark the shard stale only if the time advanced, otherwise no refresh would happen // and the shard will remain marked stale. @@ -616,13 +513,11 @@ void CatalogCache::purgeDatabase(StringData dbName) { _databaseCache.invalidateKey(dbName); _collectionCache.invalidateKeyIf( [&](const NamespaceString& nss) { return nss.db() == dbName; }); - _indexCache.invalidateKeyIf([&](const NamespaceString& nss) { return nss.db() == dbName; }); } void CatalogCache::purgeAllDatabases() { _databaseCache.invalidateAll(); _collectionCache.invalidateAll(); - _indexCache.invalidateAll(); } void CatalogCache::report(BSONObjBuilder* builder) const { @@ -630,11 +525,9 @@ void CatalogCache::report(BSONObjBuilder* builder) const { const size_t numDatabaseEntries = _databaseCache.getCacheInfo().size(); const size_t numCollectionEntries = _collectionCache.getCacheInfo().size(); - const size_t numIndexEntries = _indexCache.getCacheInfo().size(); cacheStatsBuilder.append("numDatabaseEntries", static_cast(numDatabaseEntries)); cacheStatsBuilder.append("numCollectionEntries", static_cast(numCollectionEntries)); - cacheStatsBuilder.append("numIndexEntries", static_cast(numIndexEntries)); _stats.report(&cacheStatsBuilder); _collectionCache.reportStats(&cacheStatsBuilder); @@ -679,10 +572,6 @@ void CatalogCache::invalidateCollectionEntry_LINEARIZABLE(const NamespaceString& _collectionCache.invalidateKey(nss); } -void CatalogCache::invalidateIndexEntry_LINEARIZABLE(const NamespaceString& nss) { - _indexCache.invalidateKey(nss); -} - void CatalogCache::Stats::report(BSONObjBuilder* builder) const { builder->append("countStaleConfigErrors", countStaleConfigErrors.load()); @@ -902,67 +791,4 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look } } -CatalogCache::IndexCache::IndexCache(ServiceContext* service, ThreadPoolInterface& threadPool) - : ReadThroughCache(_mutex, - service, - threadPool, - [this](OperationContext* opCtx, - const NamespaceString& nss, - const ValueHandle& indexes, - const ComparableIndexVersion& previousIndexVersion) { - return _lookupIndexes(opCtx, nss, indexes, previousIndexVersion); - }, - kIndexCacheSize) {} - -CatalogCache::IndexCache::LookupResult CatalogCache::IndexCache::_lookupIndexes( - OperationContext* opCtx, - const NamespaceString& nss, - const ValueHandle& indexes, - const ComparableIndexVersion& previousVersion) { - // This object will define the new time of the index info obtained by this refresh - auto newComparableVersion = - ComparableIndexVersion::makeComparableIndexVersion(CollectionIndexes::UNSHARDED()); - - try { - LOGV2_FOR_CATALOG_REFRESH(6686302, - 1, - "Refreshing cached indexes", - "namespace"_attr = nss, - "timeInStore"_attr = previousVersion); - - const auto readConcern = [&]() -> repl::ReadConcernArgs { - if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { - return {repl::ReadConcernLevel::kSnapshotReadConcern}; - } else { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; - } - }(); - auto collAndIndexes = Grid::get(opCtx)->catalogClient()->getCollectionAndGlobalIndexes( - opCtx, nss, readConcern); - const auto& coll = collAndIndexes.first; - newComparableVersion.setCollectionIndexes(coll.getIndexVersion()); - IndexCatalogTypeMap newIndexesMap; - for (const auto& index : collAndIndexes.second) { - newIndexesMap[index.getName()] = index; - } - - LOGV2_FOR_CATALOG_REFRESH(6686303, - newComparableVersion != previousVersion ? 0 : 1, - "Refreshed cached indexes", - "namespace"_attr = nss, - "newVersion"_attr = newComparableVersion, - "timeInStore"_attr = previousVersion); - return LookupResult( - GlobalIndexesCache(coll.getIndexVersion().indexVersion(), std::move(newIndexesMap)), - std::move(newComparableVersion)); - } catch (const DBException& ex) { - LOGV2_FOR_CATALOG_REFRESH(6686304, - 0, - "Error refreshing cached indexes", - "namespace"_attr = nss, - "error"_attr = redact(ex)); - throw; - } -} } // namespace mongo diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h index 36fdbd716fa..f7f9fec91dd 100644 --- a/src/mongo/s/catalog_cache.h +++ b/src/mongo/s/catalog_cache.h @@ -33,10 +33,8 @@ #include "mongo/db/jsobj.h" #include "mongo/platform/atomic_word.h" #include "mongo/s/catalog/type_database_gen.h" -#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog_cache_loader.h" #include "mongo/s/chunk_manager.h" -#include "mongo/s/global_index_cache.h" #include "mongo/s/shard_version.h" #include "mongo/s/type_collection_common_types_gen.h" #include "mongo/util/concurrency/thread_pool.h" @@ -178,32 +176,6 @@ public: const NamespaceString& nss, bool allowLocks = false); - /** - * Blocking method to get the global index information for a specific collection at a given - * cluster time. - * - * Returns a list of global indexes that may be empty is no global indexes exist. Throws if an - * error occurs while loading the metadata, returns a failed status. - * - * If the given atClusterTime is so far in the past that it is not possible to construct index - * info, returns a StaleClusterTime error. - */ - GlobalIndexesCache getCollectionIndexInfoAt(OperationContext* opCtx, - const NamespaceString& nss, - Timestamp atClusterTime); - - /** - * Same as the getCollectionIndexInfoAt call above, but returns the latest known index - * information for the specified namespace. - * - * While this method may fail under the same circumstances as getCollectionIndexInfoAt, it is - * guaranteed to never throw StaleClusterTime, because the latest index information should - * always be available. - */ - virtual GlobalIndexesCache getCollectionIndexInfo(OperationContext* opCtx, - const NamespaceString& nss, - bool allowLocks = false); - /** * Same as getDatbase above, but in addition forces the database entry to be refreshed. */ @@ -216,11 +188,6 @@ public: StatusWith getCollectionRoutingInfoWithRefresh(OperationContext* opCtx, const NamespaceString& nss); - /** - * Same as getCollectionIndexInfo above, but in addition causes the namespace to be refreshed. - */ - GlobalIndexesCache getCollectionIndexInfoWithRefresh(OperationContext* opCtx, - const NamespaceString& nss); /** * Same as getCollectionRoutingInfo above, but throws NamespaceNotSharded error if the namespace @@ -308,8 +275,6 @@ public: */ void invalidateCollectionEntry_LINEARIZABLE(const NamespaceString& nss); - void invalidateIndexEntry_LINEARIZABLE(const NamespaceString& nss); - private: class DatabaseCache : public DatabaseTypeCache { public: @@ -372,28 +337,11 @@ private: void _updateRefreshesStats(bool isIncremental, bool add); }; - class IndexCache : public GlobalIndexesCacheBase { - public: - IndexCache(ServiceContext* service, ThreadPoolInterface& threadPool); - - private: - LookupResult _lookupIndexes(OperationContext* opCtx, - const NamespaceString& nss, - const ValueHandle& indexes, - const ComparableIndexVersion& previousIndexVersion); - Mutex _mutex = MONGO_MAKE_LATCH("IndexCache::_mutex"); - }; - StatusWith _getCollectionRoutingInfoAt(OperationContext* opCtx, const NamespaceString& nss, boost::optional atClusterTime, bool allowLocks = false); - GlobalIndexesCache _getCollectionIndexInfoAt(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional atClusterTime, - bool allowLocks = false); - // Interface from which chunks will be retrieved CatalogCacheLoader& _cacheLoader; @@ -404,8 +352,6 @@ private: CollectionCache _collectionCache; - IndexCache _indexCache; - /** * Encapsulates runtime statistics across all databases and collections in this catalog cache */ diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp index 698b6fded46..d13273f005c 100644 --- a/src/mongo/s/catalog_cache_test.cpp +++ b/src/mongo/s/catalog_cache_test.cpp @@ -128,7 +128,7 @@ protected: } } - CollectionType loadCollection(const ShardVersion& version) { + void loadCollection(const ChunkVersion& version) { const auto coll = makeCollectionType(version); const auto scopedCollProv = scopedCollectionProvider(coll); const auto scopedChunksProv = scopedChunksProvider(makeChunks(version)); @@ -136,15 +136,6 @@ protected: const auto swChunkManager = _catalogCache->getCollectionRoutingInfo(operationContext(), coll.getNss()); ASSERT_OK(swChunkManager.getStatus()); - auto future = launchAsync([&] { - onCommand([&](const executor::RemoteCommandRequest& request) { - return makeCollectionAndIndexesAggregationResponse(coll, std::vector()); - }); - }); - const auto globalIndexesCache = - _catalogCache->getCollectionIndexInfo(operationContext(), coll.getNss()); - future.default_timed_get(); - return coll; } void loadUnshardedCollection(const NamespaceString& nss) { @@ -166,25 +157,13 @@ protected: return {chunk}; } - CollectionType makeCollectionType(const ShardVersion& collVersion) { - CollectionType coll{kNss, - collVersion.epoch(), - collVersion.getTimestamp(), - Date_t::now(), - kUUID, - kShardKeyPattern.getKeyPattern()}; - coll.setIndexVersion(collVersion); - return coll; - } - - BSONObj makeCollectionAndIndexesAggregationResponse(const CollectionType& coll, - const std::vector& indexes) { - BSONObj obj = coll.toBSON(); - BSONObjBuilder indexField; - indexField.append("indexes", indexes); - BSONObj newObj = obj.addField(indexField.obj().firstElement()); - return CursorResponse(CollectionType::ConfigNS, CursorId{0}, {newObj}) - .toBSON(CursorResponse::ResponseType::InitialResponse); + CollectionType makeCollectionType(const ChunkVersion& collVersion) { + return {kNss, + collVersion.epoch(), + collVersion.getTimestamp(), + Date_t::now(), + kUUID, + kShardKeyPattern.getKeyPattern()}; } const NamespaceString kNss{"catalgoCacheTestDB.foo"}; @@ -293,9 +272,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) { TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const CollectionGeneration gen(OID::gen(), Timestamp(1, 1)); - const auto cachedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none)); + const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); @@ -306,13 +283,11 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) { ASSERT(status == ErrorCodes::InternalError); } -TEST_F(CatalogCacheTest, OnStaleShardVersionWithGreaterPlacementVersion) { +TEST_F(CatalogCacheTest, OnStaleShardVersionWithGraterVersion) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const CollectionGeneration gen(OID::gen(), Timestamp(1, 1)); - const auto cachedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none)); - const auto wantedCollVersion = - ShardVersion(ChunkVersion(gen, {2, 0}), CollectionIndexes(gen, boost::none)); + const auto cachedCollVersion = ChunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}); + const auto wantedCollVersion = ShardVersion(ChunkVersion(cachedCollVersion, {2, 0}), + CollectionIndexes(cachedCollVersion, boost::none)); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); @@ -325,9 +300,8 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGreaterPlacementVersion) { TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const auto gen = CollectionGeneration(OID::gen(), Timestamp(42)); - const auto version = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none)); + const auto epoch = OID::gen(); + const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0}); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); @@ -382,9 +356,8 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) { TEST_F(CatalogCacheTest, LookupCollectionWithInvalidOptions) { const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const auto gen = CollectionGeneration(OID::gen(), Timestamp(42)); - const auto version = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none)); + const auto epoch = OID::gen(); + const auto version = ChunkVersion({epoch, Timestamp(42)}, {1, 0}); loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); @@ -400,59 +373,5 @@ TEST_F(CatalogCacheTest, LookupCollectionWithInvalidOptions) { ASSERT_EQUALS(swChunkManager.getStatus(), ErrorCodes::InvalidOptions); } - -TEST_F(CatalogCacheTest, OnStaleShardVersionWithGreaterIndexVersion) { - const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const CollectionGeneration gen(OID::gen(), Timestamp(1, 1)); - const auto cachedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, boost::none)); - const auto wantedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, Timestamp(1, 0))); - - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); - CollectionType coll = loadCollection(cachedCollVersion); - _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( - kNss, wantedCollVersion, kShards[0]); - - auto future = launchAsync([&] { - onCommand([&](const executor::RemoteCommandRequest& request) { - coll.setIndexVersion({{coll.getEpoch(), coll.getTimestamp()}, Timestamp(1, 0)}); - return makeCollectionAndIndexesAggregationResponse( - coll, - {IndexCatalogType("x_1", BSON("x" << 1), BSONObj(), Timestamp(1, 0), coll.getUuid()) - .toBSON()}); - }); - }); - - const auto indexInfo = _catalogCache->getCollectionIndexInfo(operationContext(), kNss); - future.default_timed_get(); -} - -TEST_F(CatalogCacheTest, OnStaleShardVersionIndexVersionBumpNotNone) { - const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - const CollectionGeneration gen(OID::gen(), Timestamp(1, 1)); - const auto cachedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, Timestamp(1, 0))); - const auto wantedCollVersion = - ShardVersion(ChunkVersion(gen, {1, 0}), CollectionIndexes(gen, Timestamp(2, 0))); - - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); - CollectionType coll = loadCollection(cachedCollVersion); - _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( - kNss, wantedCollVersion, kShards[0]); - - auto future = launchAsync([&] { - onCommand([&](const executor::RemoteCommandRequest& request) { - coll.setIndexVersion({{coll.getEpoch(), coll.getTimestamp()}, Timestamp(2, 0)}); - return makeCollectionAndIndexesAggregationResponse( - coll, - {IndexCatalogType("x_1", BSON("x" << 1), BSONObj(), Timestamp(2, 0), coll.getUuid()) - .toBSON()}); - }); - }); - - const auto indexInfo = _catalogCache->getCollectionIndexInfo(operationContext(), kNss); - future.default_timed_get(); -} } // namespace } // namespace mongo diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index fe58ca2e52a..f44c58c8760 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -930,11 +930,11 @@ bool ComparableChunkVersion::operator<(const ComparableChunkVersion& other) cons } ShardEndpoint::ShardEndpoint(const ShardId& shardName, - boost::optional shardVersionParam, - boost::optional dbVersionParam) + boost::optional shardVersion, + boost::optional dbVersion) : shardName(shardName), - shardVersion(std::move(shardVersionParam)), - databaseVersion(std::move(dbVersionParam)) { + shardVersion(std::move(shardVersion)), + databaseVersion(std::move(dbVersion)) { if (databaseVersion) invariant(shardVersion && *shardVersion == ShardVersion::UNSHARDED()); else if (shardVersion) diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h index 909c490f755..004a6a966f6 100644 --- a/src/mongo/s/chunk_manager.h +++ b/src/mongo/s/chunk_manager.h @@ -470,8 +470,8 @@ using RoutingTableHistoryValueHandle = RoutingTableHistoryCache::ValueHandle; */ struct ShardEndpoint { ShardEndpoint(const ShardId& shardName, - boost::optional shardVersionParam, - boost::optional dbVersionParam); + boost::optional shardVersion, + boost::optional dbVersion); ShardId shardName; diff --git a/src/mongo/s/collection_uuid_mismatch.cpp b/src/mongo/s/collection_uuid_mismatch.cpp index 4df6f92067e..da2d8dc6229 100644 --- a/src/mongo/s/collection_uuid_mismatch.cpp +++ b/src/mongo/s/collection_uuid_mismatch.cpp @@ -54,20 +54,20 @@ Status populateCollectionUUIDMismatch(OperationContext* opCtx, opCtx = alternativeOpCtx.get(); AlternativeClientRegion acr{client}; - auto swDbInfo = - Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, info->dbName().toStringWithTenantId()); + auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, info->db()); if (!swDbInfo.isOK()) { return swDbInfo.getStatus(); } ListCollections listCollections; // Empty tenant id is acceptable here as command's tenant id will not be serialized to BSON. - listCollections.setDbName(info->dbName()); + // TODO SERVER-68357: Use database name of CollectionUUIDMismatchInfo. + listCollections.setDbName(DatabaseName(boost::none, info->db())); listCollections.setFilter(BSON("info.uuid" << info->collectionUUID())); auto response = executeCommandAgainstDatabasePrimary(opCtx, - info->dbName().db(), + info->db(), swDbInfo.getValue(), listCollections.toBSON({}), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, @@ -83,7 +83,7 @@ Status populateCollectionUUIDMismatch(OperationContext* opCtx, if (auto actualCollectionElem = dotted_path_support::extractElementAtPath( response.swResponse.getValue().data, "cursor.firstBatch.0.name")) { - return {CollectionUUIDMismatchInfo{info->dbName(), + return {CollectionUUIDMismatchInfo{info->db(), info->collectionUUID(), info->expectedCollection(), actualCollectionElem.str()}, diff --git a/src/mongo/s/commands/cluster_abort_transaction_cmd.h b/src/mongo/s/commands/cluster_abort_transaction_cmd.h index 64d58dfdaa6..734e3363f90 100644 --- a/src/mongo/s/commands/cluster_abort_transaction_cmd.h +++ b/src/mongo/s/commands/cluster_abort_transaction_cmd.h @@ -102,8 +102,8 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { return Impl::checkAuthForOperation(opCtx); } diff --git a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp index 146778efdc0..8b73ac91430 100644 --- a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp +++ b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp @@ -101,11 +101,12 @@ public: auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase( opCtx, cmd.getDbName().toStringWithTenantId()); if (swDbInfo == ErrorCodes::NamespaceNotFound) { - uassert( - CollectionUUIDMismatchInfo( - cmd.getDbName(), *cmd.getCollectionUUID(), nss.coll().toString(), boost::none), - "Database does not exist", - !cmd.getCollectionUUID()); + uassert(CollectionUUIDMismatchInfo(cmd.getDbName().toString(), + *cmd.getCollectionUUID(), + nss.coll().toString(), + boost::none), + "Database does not exist", + !cmd.getCollectionUUID()); } const auto dbInfo = uassertStatusOK(swDbInfo); diff --git a/src/mongo/s/commands/cluster_commit_transaction_cmd.h b/src/mongo/s/commands/cluster_commit_transaction_cmd.h index bf6f51da6e8..926c2bbe6eb 100644 --- a/src/mongo/s/commands/cluster_commit_transaction_cmd.h +++ b/src/mongo/s/commands/cluster_commit_transaction_cmd.h @@ -83,8 +83,8 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { return Impl::checkAuthForOperation(opCtx); } diff --git a/src/mongo/s/commands/cluster_db_stats_cmd.cpp b/src/mongo/s/commands/cluster_db_stats_cmd.cpp index 16c3d17f91d..6e4674b4c5b 100644 --- a/src/mongo/s/commands/cluster_db_stats_cmd.cpp +++ b/src/mongo/s/commands/cluster_db_stats_cmd.cpp @@ -113,10 +113,10 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbname, - const BSONObj&) const final { + const std::string& dbname, + const BSONObj& cmdObj) const final { auto as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname.db()), + if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), ActionType::dbStats)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp index 3651dcb1253..ef1bfe63c7b 100644 --- a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp @@ -119,7 +119,7 @@ public: // Ensure our reply conforms to the IDL-defined reply structure. return DropReply::parse(IDLParserContext{"drop"}, resultObj); } catch (const ExceptionFor&) { - uassert(CollectionUUIDMismatchInfo(request().getDbName(), + uassert(CollectionUUIDMismatchInfo(request().getDbName().toString(), *request().getCollectionUUID(), request().getNamespace().coll().toString(), boost::none), diff --git a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp index 8920eb793be..b109fdcc556 100644 --- a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp @@ -101,7 +101,7 @@ public: auto catalogCache = Grid::get(opCtx)->catalogCache(); auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, fromNss.db()); if (swDbInfo == ErrorCodes::NamespaceNotFound) { - uassert(CollectionUUIDMismatchInfo(fromNss.dbName(), + uassert(CollectionUUIDMismatchInfo(fromNss.db().toString(), *request().getCollectionUUID(), fromNss.coll().toString(), boost::none), diff --git a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp index 72466b3fa23..45b0652e37f 100644 --- a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp +++ b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp @@ -94,8 +94,8 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { + const std::string& dbname, + const BSONObj& cmdObj) const override { if (!AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), ActionType::setDefaultRWConcern})) { diff --git a/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp b/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp index 62be2f3595a..eb013343c76 100644 --- a/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp @@ -86,9 +86,10 @@ public: } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbName, + const std::string& dbName, const BSONObj& cmdObj) const override { - const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj)); + const NamespaceString nss( + CommandHelpers::parseNsCollectionRequired({boost::none, dbName}, cmdObj)); if (!AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(nss), ActionType::createIndex)) { diff --git a/src/mongo/s/configure_query_analyzer_cmd.idl b/src/mongo/s/configure_query_analyzer_cmd.idl index 6dd0cc194f3..c0564c23d01 100644 --- a/src/mongo/s/configure_query_analyzer_cmd.idl +++ b/src/mongo/s/configure_query_analyzer_cmd.idl @@ -56,19 +56,15 @@ structs: configureQueryAnalyzerResponse: description: "The response for the 'configureQueryAnalyzer' command." - strict: false - inline_chained_structs: true - chained_structs: - queryAnalyzerConfiguration: newConfiguration commands: configureQueryAnalyzer: description: "The command for setting the query analyzer configuration for a collection." command_name: configureQueryAnalyzer - strict: false + strict: true namespace: type api_version: "" type: namespacestring inline_chained_structs: true chained_structs: - queryAnalyzerConfiguration: configuration + queryAnalyzerConfiguration: queryAnalyzerConfiguration diff --git a/src/mongo/s/global_index_cache.h b/src/mongo/s/global_index_cache.h index 0323c3df145..eea2dff50b1 100644 --- a/src/mongo/s/global_index_cache.h +++ b/src/mongo/s/global_index_cache.h @@ -32,7 +32,6 @@ #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/index_version.h" -#include "mongo/util/read_through_cache.h" namespace mongo { @@ -150,8 +149,4 @@ private: // than the ones created before. uint64_t _epochDisambiguatingSequenceNum{0}; }; - -using GlobalIndexesCacheBase = - ReadThroughCache; - } // namespace mongo diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp index 807745b6caa..99dff9da625 100644 --- a/src/mongo/s/query/cluster_aggregate.cpp +++ b/src/mongo/s/query/cluster_aggregate.cpp @@ -329,7 +329,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, sharded_agg_helpers::getExecutionNsRoutingInfo(opCtx, namespaces.executionNss); if (!executionNsRoutingInfoStatus.isOK()) { - uassert(CollectionUUIDMismatchInfo(request.getDbName(), + uassert(CollectionUUIDMismatchInfo(request.getDbName().toString(), *request.getCollectionUUID(), request.getNamespace().coll().toString(), boost::none), diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp index 1e007eed33a..6ccadf2e927 100644 --- a/src/mongo/s/query/cluster_find.cpp +++ b/src/mongo/s/query/cluster_find.cpp @@ -517,7 +517,7 @@ CursorId ClusterFind::runQuery(OperationContext* opCtx, for (size_t retries = 1; retries <= kMaxRetries; ++retries) { auto swCM = getCollectionRoutingInfoForTxnCmd(opCtx, query.nss()); if (swCM == ErrorCodes::NamespaceNotFound) { - uassert(CollectionUUIDMismatchInfo(query.nss().dbName(), + uassert(CollectionUUIDMismatchInfo(query.nss().db().toString(), *findCommand.getCollectionUUID(), query.nss().coll().toString(), boost::none), diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp index 21c8491135d..26f9a4d3488 100644 --- a/src/mongo/s/write_ops/batch_write_op.cpp +++ b/src/mongo/s/write_ops/batch_write_op.cpp @@ -232,7 +232,7 @@ void populateCollectionUUIDMismatch(OperationContext* opCtx, } if (*actualCollection) { - error->setStatus({CollectionUUIDMismatchInfo{info->dbName(), + error->setStatus({CollectionUUIDMismatchInfo{info->db(), info->collectionUUID(), info->expectedCollection(), **actualCollection}, diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js index c42476a39f4..7749172f0ad 100644 --- a/src/mongo/shell/assert.js +++ b/src/mongo/shell/assert.js @@ -344,6 +344,14 @@ assert = (function() { assert.soon = function(func, msg, timeout, interval, {runHangAnalyzer = true} = {}) { _validateAssertionMessage(msg); + var msgPrefix = "assert.soon failed: " + func; + + if (msg) { + if (typeof (msg) != "function") { + msgPrefix = "assert.soon failed, msg"; + } + } + var start = new Date(); if (TestData && TestData.inEvergreen) { @@ -354,14 +362,6 @@ assert = (function() { interval = interval || 200; - var msgPrefix = "assert.soon failed (timeout " + timeout + "ms): " + func; - - if (msg) { - if (typeof (msg) != "function") { - msgPrefix = "assert.soon failed (timeout " + timeout + "ms), msg"; - } - } - while (1) { if (typeof (func) == "string") { if (eval(func)) diff --git a/src/mongo/shell/data_consistency_checker.js b/src/mongo/shell/data_consistency_checker.js index 0226610d674..4e47ea22977 100644 --- a/src/mongo/shell/data_consistency_checker.js +++ b/src/mongo/shell/data_consistency_checker.js @@ -104,7 +104,7 @@ var {DataConsistencyChecker} = (function() { } hasNext() { - return this.stashedDoc !== undefined || this.cursor.hasNext(); + return this.cursor.hasNext(); } peekNext() { diff --git a/src/mongo/transport/session_workflow.cpp b/src/mongo/transport/session_workflow.cpp index 5afe4adfd97..28eac1aaec6 100644 --- a/src/mongo/transport/session_workflow.cpp +++ b/src/mongo/transport/session_workflow.cpp @@ -69,7 +69,6 @@ namespace transport { namespace { MONGO_FAIL_POINT_DEFINE(doNotSetMoreToCome); MONGO_FAIL_POINT_DEFINE(beforeCompressingExhaustResponse); -MONGO_FAIL_POINT_DEFINE(alwaysLogSlowSessionWorkflow); /** * Given a request and its already generated response, checks for exhaust flags. If exhaust is @@ -149,148 +148,6 @@ bool killExhaust(const Message& in, ServiceEntryPoint* sep, Client* client) { } } // namespace - -/** - * Acts as a split timer which captures times elapsed at various points throughout a single - * SessionWorkflow loop. The SessionWorkflow loop itself is expected to (1) construct this object - * when timing should begin, and (2) call this object's `notifySplit` function at appropriate times - * throughout the workflow. - * - * TODO(SERVER-69831): On destruction, dump stats as appropriate. - */ -class SessionWorkflowMetrics { - /** - * NOTE: when updating these, ensure: - * - These are all contiguous. - * - NumEntries is the highest constant. - * - The public constexprs are up to date. - * - The ranges in logSlowLoop are still correct. - */ - using Started_T = std::integral_constant; - using SourcedWork_T = std::integral_constant; - using ProcessedWork_T = std::integral_constant; - using SentResponse_T = std::integral_constant; - using Done_T = std::integral_constant; - using NumEntries_T = std::integral_constant; - static constexpr NumEntries_T NumEntries{}; - -public: - /** - * These constants act as tags for moments in a single SessionWorkflow loop. - */ - static constexpr Started_T Started{}; - static constexpr SourcedWork_T SourcedWork{}; - static constexpr ProcessedWork_T ProcessedWork{}; - static constexpr SentResponse_T SentResponse{}; - static constexpr Done_T Done{}; - - template - struct SplitInRange { - static constexpr bool value = Split_T::value >= Started && Split_T::value < NumEntries; - }; - - SessionWorkflowMetrics() { - _splits[Started] = Microseconds{0}; - } - - SessionWorkflowMetrics(SessionWorkflowMetrics&& other) { - *this = std::move(other); - } - SessionWorkflowMetrics& operator=(SessionWorkflowMetrics&& other) { - _isFinalized = other._isFinalized; - _timer = std::move(other._timer); - _splits = std::move(other._splits); - - // The moved-from object should avoid extraneous logging. - other._isFinalized = true; - - return *this; - } - - ~SessionWorkflowMetrics() { - finalize(); - } - - /** - * Captures the elapsed time and associates it with `split`. A second call with the same `split` - * will overwrite the previous. It is expected that this gets called for all splits other than - * Start and Done. - */ - template ::value, int> = 0> - void notifySplit(Split_T split) { - _splits[split] = _timer.elapsed(); - } - - /** - * If not already finalized, captures the elapsed time for the `Done` Split and outputs metrics - * as a log if the criteria for logging is met. Calling `finalize` explicitly is not required - * because it is invoked by the destructor, however an early call can be done if this object's - * destruction needs to be defered for any reason. - */ - void finalize() { - if (_isFinalized) - return; - _isFinalized = true; - notifySplit(Done); - - if (MONGO_unlikely(alwaysLogSlowSessionWorkflow.shouldFail())) { - logSlowLoop(); - } - } - -private: - bool _isFinalized{false}; - Timer _timer{}; - std::array, NumEntries> _splits{}; - - /** - * Returns the time elapsed between the two splits corresponding to `startIdx` and `endIdx`. - * The split time for `startIdx` is assumed to have happened before the split at `endIdx`. - * Both `startIdx` and `endIdx` are assumed to have had captured times. If not, an optional with - * no value will be returned. - */ - boost::optional microsBetween(size_t startIdx, size_t endIdx) const { - auto atEnd = _splits[endIdx]; - auto atStart = _splits[startIdx]; - if (!atStart || !atEnd) - return {}; - return *atEnd - *atStart; - } - - /** - * Appends an attribute to `attr` corresponding to a range. Returns whether a negative range was - * encountered. - */ - template - bool addAttr(const char (&name)[N], - size_t startIdx, - size_t endIdx, - logv2::DynamicAttributes& attr) { - if (auto optTime = microsBetween(startIdx, endIdx)) { - attr.add(name, duration_cast(*optTime)); - return *optTime < Microseconds{0}; - } - return false; - } - - void logSlowLoop() { - bool neg = false; - logv2::DynamicAttributes attr; - - neg |= addAttr("totalElapsed", Started, Done, attr); - neg |= addAttr("activeElapsed", SourcedWork, Done, attr); - neg |= addAttr("sourceWorkElapsed", Started, SourcedWork, attr); - neg |= addAttr("processWorkElapsed", SourcedWork, ProcessedWork, attr); - neg |= addAttr("sendResponseElapsed", ProcessedWork, SentResponse, attr); - neg |= addAttr("finalizeElapsed", SentResponse, Done, attr); - if (neg) { - attr.add("note", "Negative time range found. This indicates something went wrong."); - } - - LOGV2(6983000, "Slow SessionWorkflow loop", attr); - } -}; - class SessionWorkflow::Impl { public: Impl(SessionWorkflow* workflow, ServiceContext::UniqueClient client) @@ -400,8 +257,6 @@ private: std::unique_ptr _work; std::unique_ptr _nextWork; /**< created by exhaust responses */ - - boost::optional _metrics{}; }; class SessionWorkflow::Impl::WorkItem { @@ -651,26 +506,21 @@ void SessionWorkflow::Impl::startNewLoop(const Status& executorStatus) { return; } - _metrics = SessionWorkflowMetrics(); - makeReadyFutureWith([this] { if (_nextWork) { _work = std::move(_nextWork); } else { receiveMessage(); } - _metrics->notifySplit(SessionWorkflowMetrics::SourcedWork); + return processMessage(); }) .then([this] { - _metrics->notifySplit(SessionWorkflowMetrics::ProcessedWork); if (_work->hasOut()) { sendMessage(); - _metrics->notifySplit(SessionWorkflowMetrics::SentResponse); } }) .getAsync([this, anchor = shared_from_this()](Status status) { - _metrics = {}; scheduleNewLoop(std::move(status)); }); } diff --git a/src/mongo/unittest/golden_test_base.cpp b/src/mongo/unittest/golden_test_base.cpp index e8b6a9b298c..67b1ee62077 100644 --- a/src/mongo/unittest/golden_test_base.cpp +++ b/src/mongo/unittest/golden_test_base.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data index bf39a06c7ff..4d1d4ed28e9 100644 --- a/src/third_party/wiredtiger/import.data +++ b/src/third_party/wiredtiger/import.data @@ -2,5 +2,5 @@ "vendor": "wiredtiger", "github": "wiredtiger/wiredtiger.git", "branch": "mongodb-master", - "commit": "265abebff936adbd7b7cdeab2a2989e034d1b702" + "commit": "7e38deff7bff0f2cdad1b8760b123bcf0a476456" } diff --git a/src/third_party/wiredtiger/src/block/block_compact.c b/src/third_party/wiredtiger/src/block/block_compact.c index 098832cbfb6..883eb39b971 100644 --- a/src/third_party/wiredtiger/src/block/block_compact.c +++ b/src/third_party/wiredtiger/src/block/block_compact.c @@ -87,9 +87,10 @@ __wt_block_compact_progress(WT_SESSION_IMPL *session, WT_BLOCK *block, u_int *ms if (time_diff / WT_PROGRESS_MSG_PERIOD > *msg_countp) { ++*msg_countp; __wt_verbose_debug(session, WT_VERB_COMPACT_PROGRESS, - " compacting %s for %" PRIu64 " seconds; reviewed %" PRIu64 " pages, rewritten %" PRIu64 - " pages", - block->name, time_diff, block->compact_pages_reviewed, block->compact_pages_rewritten); + " compacting %s for %" PRIu64 " seconds; reviewed %" PRIu64 " pages, skipped %" PRIu64 + " pages, rewritten %" PRIu64 " pages", + block->name, time_diff, block->compact_pages_reviewed, block->compact_pages_skipped, + block->compact_pages_rewritten); } } /* @@ -156,8 +157,10 @@ __wt_block_compact_skip(WT_SESSION_IMPL *session, WT_BLOCK *block, bool *skipp) } __wt_verbose_debug(session, WT_VERB_COMPACT, - "%s: total reviewed %" PRIu64 " pages, total rewritten %" PRIu64 " pages", block->name, - block->compact_pages_reviewed, block->compact_pages_rewritten); + "%s: total reviewed %" PRIu64 " pages, total skipped %" PRIu64 " pages, total wrote %" PRIu64 + " pages", + block->name, block->compact_pages_reviewed, block->compact_pages_skipped, + block->compact_pages_rewritten); __wt_verbose_debug(session, WT_VERB_COMPACT, "%s: %" PRIuMAX "MB (%" PRIuMAX ") available space in the first 80%% of the file", block->name, (uintmax_t)avail_eighty / WT_MEGABYTE, (uintmax_t)avail_eighty); diff --git a/src/third_party/wiredtiger/src/include/session.h b/src/third_party/wiredtiger/src/include/session.h index 3bdb0804876..5da6f0368de 100644 --- a/src/third_party/wiredtiger/src/include/session.h +++ b/src/third_party/wiredtiger/src/include/session.h @@ -304,6 +304,4 @@ struct __wt_session_impl { }; /* Consider moving this to session_inline.h if it ever appears. */ -#define WT_READING_CHECKPOINT(s) \ - ((s)->dhandle != NULL && F_ISSET((s)->dhandle, WT_DHANDLE_OPEN) && \ - WT_DHANDLE_IS_CHECKPOINT((s)->dhandle)) +#define WT_READING_CHECKPOINT(s) ((s)->dhandle != NULL && WT_DHANDLE_IS_CHECKPOINT((s)->dhandle)) diff --git a/src/third_party/wiredtiger/test/evergreen.yml b/src/third_party/wiredtiger/test/evergreen.yml index e1c8d403311..453c187615b 100755 --- a/src/third_party/wiredtiger/test/evergreen.yml +++ b/src/third_party/wiredtiger/test/evergreen.yml @@ -1918,26 +1918,6 @@ tasks: vars: unit_test_args: --hook tiered - - name: unit-test-hook-tiered-timestamp - tags: ["python"] - depends_on: - - name: compile - commands: - - func: "fetch artifacts" - - func: "unit test" - vars: - unit_test_args: --hook tiered --hook timestamp - - - name: unit-test-hook-timestamp - tags: ["python"] - depends_on: - - name: compile - commands: - - func: "fetch artifacts" - - func: "unit test" - vars: - unit_test_args: --hook timestamp - # Break out Python unit tests into multiple buckets/tasks. We have a fixed number of buckets, # and we use the -b option of the test/suite/run.py script to split up the tests. @@ -3995,8 +3975,6 @@ buildvariants: - name: unit-test-zstd - name: unit-test-random-seed - name: unit-test-hook-tiered - - name: unit-test-hook-tiered-timestamp - - name: unit-test-hook-timestamp - name: spinlock-gcc-test - name: spinlock-pthread-adaptive-test - name: compile-wtperf diff --git a/src/third_party/wiredtiger/test/suite/hook_tiered.py b/src/third_party/wiredtiger/test/suite/hook_tiered.py index 9fcd6cfa303..1d64644e549 100755 --- a/src/third_party/wiredtiger/test/suite/hook_tiered.py +++ b/src/third_party/wiredtiger/test/suite/hook_tiered.py @@ -55,7 +55,7 @@ # from __future__ import print_function -import os, sys, unittest, wthooks +import os, sys, unittest, wthooks, wttimestamp from wttest import WiredTigerTestCase # These are the hook functions that are run when particular APIs are called. @@ -163,6 +163,13 @@ def connection_close_replace(orig_connection_close, connection_self, config): ret = orig_connection_close(connection_self, config) return ret +# Called to replace Connection.open_session +def connection_open_session_replace(orig_connection_open_session, connection_self, config): + ret_session = orig_connection_open_session(connection_self, config) + ret_session._connection = connection_self + ret_session._has_transaction = False + return ret_session + # Called to replace Session.checkpoint. # We add a call to flush_tier after the checkpoint to make sure we are exercising tiered # functionality. @@ -177,6 +184,24 @@ def session_checkpoint_replace(orig_session_checkpoint, session_self, config): ' Calling flush_tier() after checkpoint') return session_self.flush_tier(None) +# Called to replace Session.begin_transaction +def session_begin_transaction_replace(orig_session_begin_transaction, session_self, config): + ret = orig_session_begin_transaction(session_self, config) + session_self._has_transaction = True + return ret + +# Called to replace Session.commit_transaction +def session_commit_transaction_replace(orig_session_commit_transaction, session_self, config): + ret = orig_session_commit_transaction(session_self, config) + session_self._has_transaction = False + return ret + +# Called to replace Session.rollback_transaction +def session_rollback_transaction_replace(orig_session_rollback_transaction, session_self, config): + ret = orig_session_rollback_transaction(session_self, config) + session_self._has_transaction = False + return ret + # Called to replace Session.compact def session_compact_replace(orig_session_compact, session_self, uri, config): # Compact isn't implemented for tiered tables. Only call it if this can't be the uri @@ -217,7 +242,9 @@ def session_open_cursor_replace(orig_session_open_cursor, session_self, uri, dup if uri != None and uri.startswith("backup:"): testcase = WiredTigerTestCase.currentTestCase() testcase.skipTest("backup on tiered tables not yet implemented") - return orig_session_open_cursor(session_self, uri, dupcursor, config) + ret_cursor = orig_session_open_cursor(session_self, uri, dupcursor, config) + ret_cursor._session = session_self + return ret_cursor # Called to replace Session.rename def session_rename_replace(orig_session_rename, session_self, uri, newuri, config): @@ -262,6 +289,14 @@ class TieredHookCreator(wthooks.WiredTigerHookCreator): # Override some platform APIs self.platform_api = TieredPlatformAPI() + # This hook plays with timestamps, indirectly by modifying the behavior of the *DataSet classes. + # Here we declare our use of timestamp code, so that tests that have their own notion of + # timestamps can be skipped when running with this hook. + def uses(self, use_list): + if "timestamp" in use_list: + return True + return False + # Is this test one we should skip? def skip_test(self, test): # Skip any test that contains one of these strings as a substring @@ -298,6 +333,7 @@ class TieredHookCreator(wthooks.WiredTigerHookCreator): # This group fail within Python for various, sometimes unknown, reasons. "test_bug018.test_bug018", "test_checkpoint.test_checkpoint", + "test_checkpoint_target.test_checkpoint_target", "test_checkpoint_snapshot02.test_checkpoint_snapshot_with_txnid_and_timestamp", "test_compat05.test_compat05", "test_config05.test_too_many_sessions", @@ -365,6 +401,22 @@ class TieredHookCreator(wthooks.WiredTigerHookCreator): self.Connection['close'] = (wthooks.HOOK_REPLACE, lambda s, config=None: connection_close_replace(orig_connection_close, s, config)) + orig_connection_open_session = self.Connection['open_session'] + self.Connection['open_session'] = (wthooks.HOOK_REPLACE, lambda s, config=None: + connection_open_session_replace(orig_connection_open_session, s, config)) + + orig_session_begin_transaction = self.Session['begin_transaction'] + self.Session['begin_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: + session_begin_transaction_replace(orig_session_begin_transaction, s, config)) + + orig_session_commit_transaction = self.Session['commit_transaction'] + self.Session['commit_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: + session_commit_transaction_replace(orig_session_commit_transaction, s, config)) + + orig_session_rollback_transaction = self.Session['rollback_transaction'] + self.Session['rollback_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: + session_rollback_transaction_replace(orig_session_rollback_transaction, s, config)) + orig_session_compact = self.Session['compact'] self.Session['compact'] = (wthooks.HOOK_REPLACE, lambda s, uri, config=None: session_compact_replace(orig_session_compact, s, uri, config)) @@ -393,6 +445,12 @@ class TieredHookCreator(wthooks.WiredTigerHookCreator): # Override some platform APIs for this hook. class TieredPlatformAPI(wthooks.WiredTigerHookPlatformAPI): + def setUp(self): + self._timestamp = wttimestamp.WiredTigerTimeStamp() + + def tearDown(self): + pass + def tableExists(self, name): for i in range(1, 9): tablename = name + "-000000000{}.wtobj".format(i) @@ -406,6 +464,10 @@ class TieredPlatformAPI(wthooks.WiredTigerHookPlatformAPI): else: return wthooks.DefaultPlatformAPI.initialFileName(uri) + # By default, there is no timestamping by the data set classes. + def getTimestamp(self): + return self._timestamp + # Every hook file must have a top level initialize function, # returning a list of WiredTigerHook objects. diff --git a/src/third_party/wiredtiger/test/suite/hook_timestamp.py b/src/third_party/wiredtiger/test/suite/hook_timestamp.py deleted file mode 100644 index 7322b2fc19e..00000000000 --- a/src/third_party/wiredtiger/test/suite/hook_timestamp.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/env python -# -# Public Domain 2014-present MongoDB, Inc. -# Public Domain 2008-2014 WiredTiger, Inc. -# -# This is free and unencumbered software released into the public domain. -# -# Anyone is free to copy, modify, publish, use, compile, sell, or -# distribute this software, either in source code form or as a compiled -# binary, for any purpose, commercial or non-commercial, and by any -# means. -# -# In jurisdictions that recognize copyright laws, the author or authors -# of this software dedicate any and all copyright interest in the -# software to the public domain. We make this dedication for the benefit -# of the public at large and to the detriment of our heirs and -# successors. We intend this dedication to be an overt act of -# relinquishment in perpetuity of all present and future rights to this -# software under copyright law. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# [TEST_TAGS] -# ignored_file -# [END_TAGS] - -# hook_timestamp.py -# -# Insert the use of timestamps into data sets. -# -# These hooks have three functions. The primary one is setting up the platform API to return -# a "timestamper". The dataset package uses this platform API, and so will run with timestamps -# when this hook is enabled. The timestamper provides a timestamping cursor that "knows" to wrap -# timestamped transactions around certain operations, like insert. -# -# Secondly, we set hooks on the transaction APIs so we know when a transaction has been started or -# finished by the test application. If the application already a transaction in progress, -# the timestamping cursor should not try to open a transaction, but can place a timestamp on the -# current transaction. -# -# To run, for example, the cursor tests with these hooks enabled: -# ../test/suite/run.py --hooks timestamp cursor -# -from __future__ import print_function - -import os, sys, unittest, wthooks, wttimestamp -from wttest import WiredTigerTestCase - -# These are the hook functions that are run when particular APIs are called. - -# Called to replace Session.begin_transaction -def session_begin_transaction_replace(orig_session_begin_transaction, session_self, config): - ret = orig_session_begin_transaction(session_self, config) - session_self._has_transaction = True - return ret - -# Called to replace Session.commit_transaction -def session_commit_transaction_replace(orig_session_commit_transaction, session_self, config): - ret = orig_session_commit_transaction(session_self, config) - session_self._has_transaction = False - return ret - -# Called to replace Session.rollback_transaction -def session_rollback_transaction_replace(orig_session_rollback_transaction, session_self, config): - ret = orig_session_rollback_transaction(session_self, config) - session_self._has_transaction = False - return ret - -def make_dataset_names(): - import wtdataset - names = ['wtdataset'] - #g = globals(sys.modules['wtdataset']) - g = sys.modules['wtdataset'].__dict__ - for name in g: - if name.endswith('DataSet'): - names.append(name) - return names - -# Every hook file must have one or more classes descended from WiredTigerHook -# This is where the hook functions are 'hooked' to API methods. -class TimestampHookCreator(wthooks.WiredTigerHookCreator): - def __init__(self, arg=0): - # Caller can specify an optional command-line argument. We're not using it - # now, but this is where it would show up. - - # Override some platform APIs - self.platform_api = TimestampPlatformAPI() - - # This hook plays with timestamps, indirectly by modifying the behavior of the *DataSet classes. - # Here we declare our use of timestamp code, so that tests that have their own notion of - # timestamps can be skipped when running with this hook. - def uses(self, use_list): - if "timestamp" in use_list: - return True - return False - - dataset_names = make_dataset_names() - - # We skip tests that don't use datasets - def skip_test(self, test, known_skip): - import importlib - testname = str(test) - #print('CHECK: {}'.format(testname)) - modname = testname.split('.')[0] - if modname in known_skip: - return known_skip[modname] - g = sys.modules[modname].__dict__ - uses_dataset = False - for dsname in self.dataset_names: - if dsname in g: - uses_dataset = True - break - #print('CHECK: {}: {}'.format(test,uses_dataset)) - skip = not uses_dataset - known_skip[modname] = skip - return skip - - # Remove tests that won't work on timestamp cursors - def filter_tests(self, tests): - new_tests = unittest.TestSuite() - known_skip = dict() - new_tests.addTests([t for t in tests if not self.skip_test(t, known_skip)]) - return new_tests - - def get_platform_api(self): - return self.platform_api - - def setup_hooks(self): - orig_session_begin_transaction = self.Session['begin_transaction'] - self.Session['begin_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: - session_begin_transaction_replace(orig_session_begin_transaction, s, config)) - - orig_session_commit_transaction = self.Session['commit_transaction'] - self.Session['commit_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: - session_commit_transaction_replace(orig_session_commit_transaction, s, config)) - - orig_session_rollback_transaction = self.Session['rollback_transaction'] - self.Session['rollback_transaction'] = (wthooks.HOOK_REPLACE, lambda s, config=None: - session_rollback_transaction_replace(orig_session_rollback_transaction, s, config)) - -# Override some platform APIs for this hook. -class TimestampPlatformAPI(wthooks.WiredTigerHookPlatformAPI): - def setUp(self): - self._timestamp = wttimestamp.WiredTigerTimeStamp() - - def tearDown(self): - pass - - # Return a timestamping implementation, it will be used by the data set classes. - def getTimestamp(self): - return self._timestamp - - -# Every hook file must have a top level initialize function, -# returning a list of WiredTigerHook objects. -def initialize(arg): - return [TimestampHookCreator(arg)] diff --git a/src/third_party/wiredtiger/test/suite/suite_subprocess.py b/src/third_party/wiredtiger/test/suite/suite_subprocess.py index 507c647757f..2aab7e4f186 100755 --- a/src/third_party/wiredtiger/test/suite/suite_subprocess.py +++ b/src/third_party/wiredtiger/test/suite/suite_subprocess.py @@ -30,7 +30,6 @@ from __future__ import print_function import os, re, subprocess, sys from run import wt_builddir from wttest import WiredTigerTestCase -import wttest # suite_subprocess.py # Run a subprocess within the test suite @@ -225,18 +224,15 @@ class suite_subprocess: return [ returncode, new_home_dir ] # Run the wt utility. - - # FIXME-WT-9808: - # The tiered hook silently interjects tiered configuration and extensions, - # these are not yet dealt with when running the external 'wt' process. - @wttest.skip_for_hook("tiered", "runWt cannot add needed extensions") def runWt(self, args, infilename=None, outfilename=None, errfilename=None, closeconn=True, reopensession=True, failure=False): - # FIXME-WT-9809: - if 'timestamp' in self.hook_names and args[0] == 'load': - self.skipTest("the load utility cannot be run when timestamps are already set") + # FIXME-WT-9808: + # The tiered hook silently interjects tiered configuration and extensions, + # these are not yet dealt with when running the external 'wt' process. + if 'tiered' in self.hook_names: + self.skipTest("runWt is not yet supported with tiering") # Close the connection to guarantee everything is flushed, and that # we can open it from another process. diff --git a/src/third_party/wiredtiger/test/suite/test_checkpoint01.py b/src/third_party/wiredtiger/test/suite/test_checkpoint01.py index 6f3940f47cb..e8c08cbd48e 100755 --- a/src/third_party/wiredtiger/test/suite/test_checkpoint01.py +++ b/src/third_party/wiredtiger/test/suite/test_checkpoint01.py @@ -209,18 +209,15 @@ class test_checkpoint_target(wttest.WiredTigerTestCase): ]) def update(self, uri, ds, value): - cursor = ds.open_cursor(uri, None, "overwrite") + cursor = self.session.open_cursor(uri, None, "overwrite") cursor[ds.key(10)] = value cursor.close() def check(self, uri, ds, value): - cursor = ds.open_cursor(uri, None, "checkpoint=checkpoint-1") + cursor = self.session.open_cursor(uri, None, "checkpoint=checkpoint-1") self.assertEquals(cursor[ds.key(10)], value) cursor.close() - # FIXME-WT-9902 - @wttest.skip_for_hook("tiered", "strange interaction with tiered and named checkpoints using target") - @wttest.skip_for_hook("timestamp", "strange interaction with timestamps and named checkpoints using target") def test_checkpoint_target(self): # Create 3 objects, change one record to an easily recognizable string. uri = self.uri + '1' diff --git a/src/third_party/wiredtiger/test/suite/test_compact01.py b/src/third_party/wiredtiger/test/suite/test_compact01.py index 4193a9e2dea..9790816805c 100755 --- a/src/third_party/wiredtiger/test/suite/test_compact01.py +++ b/src/third_party/wiredtiger/test/suite/test_compact01.py @@ -73,7 +73,6 @@ class test_compact(wttest.WiredTigerTestCase, suite_subprocess): return statDict # Test compaction. - @wttest.skip_for_hook("timestamp", "removing timestamped items will not free space") def test_compact(self): # Populate an object uri = self.type + self.name @@ -89,11 +88,11 @@ class test_compact(wttest.WiredTigerTestCase, suite_subprocess): stat_cursor.close() # Remove most of the object. - c1 = ds.open_cursor(uri, None) + c1 = self.session.open_cursor(uri, None) c1.set_key(ds.key(5)) - c2 = ds.open_cursor(uri, None) + c2 = self.session.open_cursor(uri, None) c2.set_key(ds.key(self.nentries - 5)) - ds.truncate(None, c1, c2, None) + self.session.truncate(None, c1, c2, None) c1.close() c2.close() diff --git a/src/third_party/wiredtiger/test/suite/test_cursor06.py b/src/third_party/wiredtiger/test/suite/test_cursor06.py index 03eafeed145..88364288f83 100755 --- a/src/third_party/wiredtiger/test/suite/test_cursor06.py +++ b/src/third_party/wiredtiger/test/suite/test_cursor06.py @@ -62,7 +62,7 @@ class test_cursor06(wttest.WiredTigerTestCase): cursor.set_key(self.ds.key(10)) cursor.set_value(self.ds.value(10)) - @wttest.skip_for_hook("timestamp", "crashes on final connection close") # FIXME-WT-9809 + @wttest.skip_for_hook("tiered", "crashes on final connection close") # FIXME-WT-9809 def test_reconfigure_overwrite(self): uri = self.type + self.name for open_config in (None, "overwrite=0", "overwrite=1"): diff --git a/src/third_party/wiredtiger/test/suite/test_cursor12.py b/src/third_party/wiredtiger/test/suite/test_cursor12.py index fb6187137e6..614bf5c713c 100755 --- a/src/third_party/wiredtiger/test/suite/test_cursor12.py +++ b/src/third_party/wiredtiger/test/suite/test_cursor12.py @@ -339,7 +339,7 @@ class test_cursor12(wttest.WiredTigerTestCase): self.modify_confirm(ds, False) # Check that we can perform a large number of modifications to a record. - @wttest.skip_for_hook("timestamp", "crashes on commit_transaction or connection close") # FIXME-WT-9809 + @wttest.skip_for_hook("tiered", "crashes on commit_transaction or connection close") # FIXME-WT-9809 def test_modify_many(self): ds = SimpleDataSet(self, self.uri, 20, key_format=self.keyfmt, value_format=self.valuefmt) diff --git a/src/third_party/wiredtiger/test/suite/test_cursor17.py b/src/third_party/wiredtiger/test/suite/test_cursor17.py index dbdddab1f6b..84cfc4d7c27 100755 --- a/src/third_party/wiredtiger/test/suite/test_cursor17.py +++ b/src/third_party/wiredtiger/test/suite/test_cursor17.py @@ -60,7 +60,7 @@ class test_cursor17(wttest.WiredTigerTestCase): self.ds = self.dataset(self, self.type + self.tablename, rownum, key_format=self.keyformat) self.ds.populate() - @wttest.skip_for_hook("timestamp", "fails assertion 99") # FIXME-WT-9809 + @wttest.skip_for_hook("tiered", "fails assertion 99") # FIXME-WT-9809 def test_globally_deleted_key(self): self.populate(100) diff --git a/src/third_party/wiredtiger/test/suite/test_flcs01.py b/src/third_party/wiredtiger/test/suite/test_flcs01.py index 44b2bbf551c..fae21b7e02c 100755 --- a/src/third_party/wiredtiger/test/suite/test_flcs01.py +++ b/src/third_party/wiredtiger/test/suite/test_flcs01.py @@ -105,7 +105,7 @@ class test_flcs01(wttest.WiredTigerTestCase): self.check_prev(cursor, k, 0) self.session.rollback_transaction() - @wttest.skip_for_hook("timestamp", "crashes in evict function, during cursor reset") # FIXME-WT-9809 + @wttest.skip_for_hook("tiered", "crashes in evict function, during cursor reset") # FIXME-WT-9809 def test_flcs(self): uri = "table:test_flcs01" nrows = 44 diff --git a/src/third_party/wiredtiger/test/suite/test_flcs05.py b/src/third_party/wiredtiger/test/suite/test_flcs05.py index 8bb60bab6f4..cff43208d7d 100755 --- a/src/third_party/wiredtiger/test/suite/test_flcs05.py +++ b/src/third_party/wiredtiger/test/suite/test_flcs05.py @@ -56,7 +56,7 @@ class test_flcs05(wttest.WiredTigerTestCase): self.session.rollback_transaction() evict_cursor.close() - @wttest.skip_for_hook("timestamp", "fails at begin_transaction") # FIXME-WT-9809 + @wttest.skip_for_hook("tiered", "fails at begin_transaction") # FIXME-WT-9809 def test_flcs(self): uri = "table:test_flcs05" nrows = 44 diff --git a/src/third_party/wiredtiger/test/suite/test_inmem01.py b/src/third_party/wiredtiger/test/suite/test_inmem01.py index 4764f526030..14263a1648e 100644 --- a/src/third_party/wiredtiger/test/suite/test_inmem01.py +++ b/src/third_party/wiredtiger/test/suite/test_inmem01.py @@ -63,7 +63,7 @@ class test_inmem01(wttest.WiredTigerTestCase): # Figure out the last key we successfully inserted, and check all # previous inserts are still there. - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) cursor.prev() last_key = int(cursor.get_key()) ds = SimpleDataSet(self, self.uri, last_key, key_format=self.keyfmt, @@ -81,15 +81,13 @@ class test_inmem01(wttest.WiredTigerTestCase): # Now that the database contains as much data as will fit into # the configured cache, verify removes succeed. - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) for i in range(1, 100): cursor.set_key(ds.key(i)) self.assertEqual(cursor.remove(), 0) # Run queries after adding, removing and re-inserting data. # Try out keeping a cursor open while adding new data. - - @wttest.skip_for_hook("timestamp", "removing timestamped items will not free space") def test_insert_over_delete_replace(self): msg = '/WT_CACHE_FULL.*/' ds = SimpleDataSet(self, self.uri, 10000000, key_format=self.keyfmt, @@ -97,7 +95,7 @@ class test_inmem01(wttest.WiredTigerTestCase): self.assertRaisesHavingMessage(wiredtiger.WiredTigerError, ds.populate, msg) - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) cursor.prev() last_key = int(cursor.get_key()) @@ -143,7 +141,7 @@ class test_inmem01(wttest.WiredTigerTestCase): # Now that the database contains as much data as will fit into # the configured cache, verify removes succeed. - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) for i in range(1, last_key // 4, 1): cursor.set_key(ds.key(i)) self.assertEqual(cursor.remove(), 0) @@ -178,7 +176,7 @@ class test_inmem01(wttest.WiredTigerTestCase): ds = SimpleDataSet(self, self.uri, 0, key_format=self.keyfmt, value_format=self.valuefmt, config=self.table_config) ds.populate() - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) run = 0 start, last_key = -1000, 0 diff --git a/src/third_party/wiredtiger/test/suite/test_stat01.py b/src/third_party/wiredtiger/test/suite/test_stat01.py index 5431a6f892d..6a4ad5423ad 100755 --- a/src/third_party/wiredtiger/test/suite/test_stat01.py +++ b/src/third_party/wiredtiger/test/suite/test_stat01.py @@ -142,7 +142,6 @@ class test_stat01(wttest.WiredTigerTestCase): cursor.close() # Test simple per-checkpoint statistics. - @wttest.skip_for_hook("timestamp", "__txn_visiable_all_id assertion hit") # FIXME-WT-9809 def test_checkpoint_stats(self): ds = SimpleDataSet(self, self.uri, self.nentries, config=self.config, key_format=self.keyfmt) diff --git a/src/third_party/wiredtiger/test/suite/test_stat05.py b/src/third_party/wiredtiger/test/suite/test_stat05.py index a93b309e585..6de398c0524 100644 --- a/src/third_party/wiredtiger/test/suite/test_stat05.py +++ b/src/third_party/wiredtiger/test/suite/test_stat05.py @@ -92,7 +92,7 @@ class test_stat_cursor_config(wttest.WiredTigerTestCase): self, self.uri, 100, key_format=key_format, value_format=value_format, config=self.cfg) ds.populate() self.openAndWalkStatCursor() - cursor = ds.open_cursor(self.uri, None) + cursor = self.session.open_cursor(self.uri, None) for i in range(100, 40000 + 1): if i % 100 == 0: self.openAndWalkStatCursor() diff --git a/src/third_party/wiredtiger/test/suite/wtdataset.py b/src/third_party/wiredtiger/test/suite/wtdataset.py index c6c399625ec..afda18043b6 100755 --- a/src/third_party/wiredtiger/test/suite/wtdataset.py +++ b/src/third_party/wiredtiger/test/suite/wtdataset.py @@ -58,7 +58,7 @@ class BaseDataSet(object): if session == None: session = self.testcase.session c = session.open_cursor(uri, None, config) - return wttimestamp.TimestampedCursor(session, c, self.timestamp, self.testcase) + return wttimestamp.TimestampedCursor(c, self.timestamp, self.testcase) def truncate(self, uri, c1, c2, config, session=None): if session == None: diff --git a/src/third_party/wiredtiger/test/suite/wthooks.py b/src/third_party/wiredtiger/test/suite/wthooks.py index 5f52a1def1f..2ee0e26c499 100755 --- a/src/third_party/wiredtiger/test/suite/wthooks.py +++ b/src/third_party/wiredtiger/test/suite/wthooks.py @@ -143,7 +143,7 @@ def hooked_function(self, orig_func, hook_info_name, *args): class WiredTigerHookManager(object): def __init__(self, hooknames = []): self.hooks = [] - self.platform_apis = [] + self.platform_api = None names_seen = [] for name in hooknames: # The hooks are indicated as "somename=arg" or simply "somename". @@ -172,8 +172,16 @@ class WiredTigerHookManager(object): for hook in self.hooks: hook.setup_hooks() api = hook.get_platform_api() # can return None - self.platform_apis.append(api) - self.platform_apis.append(DefaultPlatformAPI()) + if api: + # We currently don't allow multiple platforms to create their own API, + # but this could be relaxed. Imagine that hooks implement subsets of the + # API. We could create an ordered list, and try each platform_api in turn. + if self.platform_api: + raise Exception('Running multiple hooks, each with their own platform API, ' + + 'is not implemented') + self.platform_api = api + if self.platform_api == None: + self.platform_api = DefaultPlatformAPI() def add_hook(self, clazz, method_name, hook_type, hook_func): if not hasattr(clazz, method_name): @@ -236,7 +244,7 @@ class WiredTigerHookManager(object): return self.hook_names def get_platform_api(self): - return MultiPlatformAPI(self.platform_apis) + return self.platform_api # Returns a list of hook names that use something on the list def hooks_using(self, use_list): @@ -289,28 +297,39 @@ class WiredTigerHookCreator(ABC): def uses(self, use_list): return False -class WiredTigerHookPlatformAPI(object): +class WiredTigerHookPlatformAPI(ABC): + @abstractmethod def setUp(self): """Called at the beginning of a test case""" pass + @abstractmethod def tearDown(self): """Called at the termination of a test case""" pass + @abstractmethod def tableExists(self, name): """Return boolean if local files exist for the table with the given base name""" - raise NotImplementedError('tableExists method not implemented') + pass + @abstractmethod def initialFileName(self, uri): """The first local backing file name created for this URI.""" - raise NotImplementedError('initialFileName method not implemented') + pass + @abstractmethod def getTimestamp(self): """The timestamp generator for this test case.""" - raise NotImplementedError('getTimestamp method not implemented') + pass class DefaultPlatformAPI(WiredTigerHookPlatformAPI): + def setUp(self): + pass + + def tearDown(self): + pass + def tableExists(self, name): tablename = name + ".wt" return os.path.exists(tablename) @@ -326,44 +345,3 @@ class DefaultPlatformAPI(WiredTigerHookPlatformAPI): # By default, there is no automatic timestamping by test infrastructure classes. def getTimestamp(self): return None - -class MultiPlatformAPI(WiredTigerHookPlatformAPI): - def __init__(self, platform_apis): - self.apis = platform_apis - - def setUp(self): - """Called at the beginning of a test case""" - for api in self.apis: - api.setUp() - - def tearDown(self): - """Called at the termination of a test case""" - for api in self.apis: - api.tearDown() - - def tableExists(self, name): - """Return boolean if local files exist for the table with the given base name""" - for api in self.apis: - try: - return api.tableExists(name) - except NotImplementedError: - pass - raise Exception('tableExists: no implementation') # should never happen - - def initialFileName(self, uri): - """The first local backing file name created for this URI.""" - for api in self.apis: - try: - return api.initialFileName(uri) - except NotImplementedError: - pass - raise Exception('initialFileName: no implementation') # should never happen - - def getTimestamp(self): - """The timestamp generator for this test case.""" - for api in self.apis: - try: - return api.getTimestamp() - except NotImplementedError: - pass - raise Exception('getTimestamp: no implementation') # should never happen diff --git a/src/third_party/wiredtiger/test/suite/wttimestamp.py b/src/third_party/wiredtiger/test/suite/wttimestamp.py index 03edd2e7c7c..26fff886d65 100755 --- a/src/third_party/wiredtiger/test/suite/wttimestamp.py +++ b/src/third_party/wiredtiger/test/suite/wttimestamp.py @@ -49,8 +49,7 @@ class WiredTigerTimeStamp(object): @contextmanager def session_timestamped_transaction(session, timestamper): need_commit = False - if timestamper != None and \ - not (hasattr(session, "_has_transaction") and session._has_transaction): + if timestamper != None and not getattr(session, "_has_transaction", False): session.begin_transaction() need_commit = True yield @@ -67,13 +66,10 @@ def session_timestamped_transaction(session, timestamper): # are passed to the implementation object (via __getattr__), # except for the ones that we explicitly override here. class TimestampedCursor(wiredtiger.Cursor): - def __init__(self, session, cursor, timeStamper, testcase): + def __init__(self, cursor, timeStamper, testcase): self._cursor = cursor self._timeStamper = timeStamper self._testcase = testcase - self._session = session - if not hasattr(session, "_has_transaction"): - session._has_transaction = False def __getattr__(self, name): return getattr(self._cursor, name) @@ -81,7 +77,11 @@ class TimestampedCursor(wiredtiger.Cursor): # A more convenient way to "wrap" an operation in a transaction @contextmanager def timestamped_transaction(self): - with session_timestamped_transaction(self._session, self._timeStamper): + # Prefer the _session object if available, it returns a Python + # Session object that is 1-1 mapped to the WT_SESSION in the C API. + session = getattr(self._cursor, "_session", self._cursor.session) + timestamper = self._timeStamper + with session_timestamped_transaction(session, timestamper): yield # Overrides Cursor.insert