From fb04e108b33ea0f5a24d47f32ff15808e69add09 Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Mon, 13 May 2024 16:10:27 +0200 Subject: [PATCH 1/7] RUBY-3379 CSOT for GridFS --- lib/mongo.rb | 1 + lib/mongo/collection.rb | 10 +- lib/mongo/csot_timeout_holder.rb | 115 ++++++++ lib/mongo/grid/fs_bucket.rb | 55 +++- lib/mongo/grid/stream/read.rb | 16 +- lib/mongo/grid/stream/write.rb | 25 +- lib/mongo/operation/context.rb | 85 +----- spec/runners/unified/ambiguous_operations.rb | 14 + spec/runners/unified/crud_operations.rb | 2 +- spec/runners/unified/grid_fs_operations.rb | 39 ++- spec/runners/unified/test.rb | 3 + .../gridfs-advanced.yml | 207 +++++++++++++++ .../gridfs-delete.yml | 152 +++++++++++ .../gridfs-download.yml | 182 +++++++++++++ .../gridfs-find.yml | 100 +++++++ .../gridfs-upload.yml | 249 ++++++++++++++++++ 16 files changed, 1151 insertions(+), 104 deletions(-) create mode 100644 lib/mongo/csot_timeout_holder.rb create mode 100644 spec/runners/unified/ambiguous_operations.rb create mode 100644 spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml create mode 100644 spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml create mode 100644 spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml create mode 100644 spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml create mode 100644 spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml diff --git a/lib/mongo.rb b/lib/mongo.rb index b90cd4a011..c866ad1a9e 100644 --- a/lib/mongo.rb +++ b/lib/mongo.rb @@ -39,6 +39,7 @@ require 'mongo/semaphore' require 'mongo/distinguishing_semaphore' require 'mongo/condition_variable' +require 'mongo/csot_timeout_holder' require 'mongo/options' require 'mongo/loggable' require 'mongo/cluster_time' diff --git a/lib/mongo/collection.rb b/lib/mongo/collection.rb index e14b21c044..5cd379b982 100644 --- a/lib/mongo/collection.rb +++ b/lib/mongo/collection.rb @@ -441,12 +441,14 @@ def create(opts = {}) # @option opts [ Hash ] :write_concern The write concern options. # @option opts [ Hash | nil ] :encrypted_fields Encrypted fields hash that # was provided to `create` collection helper. + # @option opts [ Integer ] :timeout_ms The per-operation timeout in milliseconds. + # Must a positive integer. The default value is unset which means infinite. # # @return [ Result ] The result of the command. # # @since 2.0.0 def drop(opts = {}) - client.send(:with_session, opts) do |session| + client.with_session(opts) do |session| maybe_drop_emm_collections(opts[:encrypted_fields], client, session) do temp_write_concern = write_concern write_concern = if opts[:write_concern] @@ -454,7 +456,11 @@ def drop(opts = {}) else temp_write_concern end - context = Operation::Context.new(client: client, session: session) + context = Operation::Context.new( + client: client, + session: session, + operation_timeouts: operation_timeouts(opts) + ) operation = Operation::Drop.new({ selector: { :drop => name }, db_name: database.name, diff --git a/lib/mongo/csot_timeout_holder.rb b/lib/mongo/csot_timeout_holder.rb new file mode 100644 index 0000000000..4d09a485af --- /dev/null +++ b/lib/mongo/csot_timeout_holder.rb @@ -0,0 +1,115 @@ +# frozen_string_literal: true + +# Copyright (C) 2024 MongoDB Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module Mongo + class CsotTimeoutHolder + def initialize(session: nil, operation_timeouts: {}) + @deadline = calculate_deadline(operation_timeouts, session) + @operation_timeouts = operation_timeouts + @timeout_sec = if @deadline then @deadline - Utils.monotonic_time end + end + + attr_reader :deadline + attr_reader :timeout_sec + attr_reader :operation_timeouts + + # @return [ true | false ] Whether CSOT is enabled for the operation + def csot? + !deadline.nil? + end + + # @return [ true | false ] Returns false if CSOT is not enabled, or if + # CSOT is set to 0 (means unlimited), otherwise true. + def has_timeout? + ![nil, 0].include?(@deadline) + end + + # @return [ Float | nil ] Returns the remaining seconds of the timeout + # set for the operation; if no timeout is set, or the timeout is 0 + # (means unlimited) returns nil. + def remaining_timeout_sec + return nil unless has_timeout? + + deadline - Utils.monotonic_time + end + + def remaining_timeout_sec! + check_timeout! + remaining_timeout_sec + end + + # @return [ Integer | nil ] Returns the remaining milliseconds of the timeout + # set for the operation; if no timeout is set, or the timeout is 0 + # (means unlimited) returns nil. + def remaining_timeout_ms + seconds = remaining_timeout_sec + return nil if seconds.nil? + + (seconds * 1_000).to_i + end + + def remaining_timeout_ms! + check_timeout! + remaining_timeout_ms + end + + # @return [ true | false ] Whether the timeout for the operation expired. + # If no timeout set, this method returns false. + def timeout_expired? + if has_timeout? + Utils.monotonic_time >= deadline + else + false + end + end + + # Check whether the operation timeout expired, and raises an appropriate + # error if yes. + # + # @raise [ Error::TimeoutError ] + def check_timeout! + if timeout_expired? + raise Error::TimeoutError, "Operation took more than #{timeout_sec} seconds" + end + end + + private + + def calculate_deadline(opts = {}, session = nil) + if opts[:operation_timeout_ms] && session&.with_transaction_deadline + raise ArgumentError, 'Cannot override timeout_ms inside with_transaction block' + end + + if session&.with_transaction_deadline + session&.with_transaction_deadline + elsif operation_timeout_ms = opts[:operation_timeout_ms] + if operation_timeout_ms > 0 + Utils.monotonic_time + (operation_timeout_ms / 1_000.0) + elsif operation_timeout_ms == 0 + 0 + elsif operation_timeout_ms < 0 + raise ArgumentError, "timeout_ms must be a non-negative integer but #{operation_timeout_ms} given" + end + elsif inherited_timeout_ms = opts[:inherited_timeout_ms] + if inherited_timeout_ms > 0 + Utils.monotonic_time + (inherited_timeout_ms / 1_000.0) + elsif inherited_timeout_ms == 0 + 0 + end + end + end + end +end diff --git a/lib/mongo/grid/fs_bucket.rb b/lib/mongo/grid/fs_bucket.rb index 8651e0545d..4fbc5218c1 100644 --- a/lib/mongo/grid/fs_bucket.rb +++ b/lib/mongo/grid/fs_bucket.rb @@ -201,8 +201,8 @@ def prefix # @return [ Result ] The result of the remove. # # @since 2.0.0 - def delete_one(file) - delete(file.id) + def delete_one(file, opts = {}) + delete(file.id, opts) end # Remove a single file, identified by its id from the GridFS. @@ -217,9 +217,14 @@ def delete_one(file) # @raise [ Error::FileNotFound ] If the file is not found. # # @since 2.1.0 - def delete(id) - result = files_collection.find({ :_id => id }, @options).delete_one - chunks_collection.find({ :files_id => id }, @options).delete_many + def delete(id, opts = {}) + timeout_holder = CsotTimeoutHolder.new(operation_timeouts: operation_timeouts(opts)) + result = files_collection + .find({ :_id => id }, @options.merge(timeout_ms: timeout_holder.remaining_timeout_ms)) + .delete_one(timeout_ms: timeout_holder.remaining_timeout_ms) + chunks_collection + .find({ :files_id => id }, @options.merge(timeout_ms: timeout_holder.remaining_timeout_ms)) + .delete_many(timeout_ms: timeout_holder.remaining_timeout_ms) raise Error::FileNotFound.new(id, :id) if result.n == 0 result end @@ -485,9 +490,10 @@ def write_concern end # Drop the collections that implement this bucket. - def drop - files_collection.drop - chunks_collection.drop + def drop(opts = {}) + context = Operation::Context.new(operation_timeouts: operation_timeouts(opts)) + files_collection.drop(timeout_ms: context.remaining_timeout_ms) + chunks_collection.drop(timeout_ms: context.remaining_timeout_ms) end private @@ -512,12 +518,24 @@ def files_name "#{prefix}.#{Grid::File::Info::COLLECTION}" end - def ensure_indexes! - if files_collection.find({}, limit: 1, projection: { _id: 1 }).first.nil? + def ensure_indexes!(timeout_holder = nil) + fc_idx = files_collection.find( + {}, + limit: 1, + projection: { _id: 1 }, + timeout_ms: timeout_holder&.remaining_timeout_ms + ).first + if fc_idx.nil? create_index_if_missing!(files_collection, FSBucket::FILES_INDEX) end - if chunks_collection.find({}, limit: 1, projection: { _id: 1 }).first.nil? + cc_idx = chunks_collection.find( + {}, + limit: 1, + projection: { _id: 1 }, + timeout_ms: timeout_holder&.remaining_timeout_ms + ).first + if cc_idx.nil? create_index_if_missing!(chunks_collection, FSBucket::CHUNKS_INDEX, :unique => true) end end @@ -537,6 +555,21 @@ def create_index_if_missing!(collection, index_spec, options = {}) end end end + + # @return [ Hash ] timeout_ms value set on the operation level (if any), + # and/or timeout_ms that is set on collection/database/client level (if any). + # + # @api private + def operation_timeouts(opts = {}) + # TODO: We should re-evaluate if we need two timeouts separately. + {}.tap do |result| + if opts[:timeout_ms].nil? + result[:inherited_timeout_ms] = database.timeout_ms + else + result[:operation_timeout_ms] = opts[:timeout_ms] + end + end + end end end end diff --git a/lib/mongo/grid/stream/read.rb b/lib/mongo/grid/stream/read.rb index f33b4a5126..796eaa129b 100644 --- a/lib/mongo/grid/stream/read.rb +++ b/lib/mongo/grid/stream/read.rb @@ -59,6 +59,12 @@ def initialize(fs, options) @file_id = @options.delete(:file_id) @options.freeze @open = true + @timeout_holder = CsotTimeoutHolder.new( + operation_timeouts: { + operation_timeout_ms: options[:timeout_ms], + inherited_timeout_ms: fs.database.timeout_ms + } + ) end # Iterate through chunk data streamed from the FSBucket. @@ -178,7 +184,11 @@ def read_preference # @since 2.1.0 def file_info @file_info ||= begin - doc = options[:file_info_doc] || fs.files_collection.find(_id: file_id).first + doc = options[:file_info_doc] || + fs.files_collection.find( + { _id: file_id }, + { timeout_ms: @timeout_holder.remaining_timeout_ms! } + ).first if doc File::Info.new(Options::Mapper.transform(doc, File::Info::MAPPINGS.invert)) else @@ -209,6 +219,10 @@ def view else options end + if @timeout_holder.csot? + opts[:timeout_ms] = @timeout_holder.remaining_timeout_ms! + opts[:timeout_mode] = :cursor_lifetime + end fs.chunks_collection.find({ :files_id => file_id }, opts).sort(:n => 1) end diff --git a/lib/mongo/grid/stream/write.rb b/lib/mongo/grid/stream/write.rb index 75ef68c56b..4ff2dc0a34 100644 --- a/lib/mongo/grid/stream/write.rb +++ b/lib/mongo/grid/stream/write.rb @@ -83,6 +83,12 @@ def initialize(fs, options) @options.freeze @filename = @options[:filename] @open = true + @timeout_holder = CsotTimeoutHolder.new( + operation_timeouts: { + operation_timeout_ms: options[:timeout_ms], + inherited_timeout_ms: fs.database.timeout_ms + } + ) end # Write to the GridFS bucket from the source stream or a string. @@ -107,7 +113,12 @@ def write(io) end chunks = File::Chunk.split(io, file_info, @n) @n += chunks.size - chunks_collection.insert_many(chunks) unless chunks.empty? + unless chunks.empty? + chunks_collection.insert_many( + chunks, + timeout_ms: @timeout_holder.remaining_timeout_ms! + ) + end self end @@ -124,7 +135,10 @@ def write(io) def close ensure_open! update_length - files_collection.insert_one(file_info, @options) + files_collection.insert_one( + file_info, + @options.merge(timeout_ms: @timeout_holder.remaining_timeout_ms!) + ) @open = false file_id end @@ -166,7 +180,10 @@ def closed? # # @since 2.1.0 def abort - fs.chunks_collection.find({ :files_id => file_id }, @options).delete_many + fs.chunks_collection.find( + { :files_id => file_id }, + @options.merge(timeout_ms: @timeout_holder.remaining_timeout_ms!) + ).delete_many (@open = false) || true end @@ -200,7 +217,7 @@ def file_info end def ensure_indexes! - fs.send(:ensure_indexes!) + fs.send(:ensure_indexes!, @timeout_holder) end def ensure_open! diff --git a/lib/mongo/operation/context.rb b/lib/mongo/operation/context.rb index d023bc313b..8f736a291d 100644 --- a/lib/mongo/operation/context.rb +++ b/lib/mongo/operation/context.rb @@ -34,7 +34,7 @@ module Operation # operations. # # @api private - class Context + class Context < CsotTimeoutHolder def initialize( client: nil, session: nil, @@ -61,19 +61,14 @@ def initialize( @session = session @view = view @connection_global_id = connection_global_id - @deadline = calculate_deadline(operation_timeouts, session) - @operation_timeouts = operation_timeouts - @timeout_sec = if @deadline then @deadline - Utils.monotonic_time end @options = options + super(session: session, operation_timeouts: operation_timeouts) end attr_reader :client attr_reader :session attr_reader :view - attr_reader :deadline - attr_reader :timeout_sec attr_reader :options - attr_reader :operation_timeouts # Returns a new Operation::Context with the deadline refreshed # and relative to the current moment. @@ -165,85 +160,9 @@ def encrypter end end - # @return [ true | false ] Whether CSOT is enabled for the operation - def csot? - !deadline.nil? - end - - # @return [ true | false ] Returns false if CSOT is not enabled, or if - # CSOT is set to 0 (means unlimited), otherwise true. - def has_timeout? - ![nil, 0].include?(@deadline) - end - - # @return [ Float | nil ] Returns the remaining seconds of the timeout - # set for the operation; if no timeout is set, or the timeout is 0 - # (means unlimited) returns nil. - def remaining_timeout_sec - return nil unless has_timeout? - - deadline - Utils.monotonic_time - end - - # @return [ Integer | nil ] Returns the remaining milliseconds of the timeout - # set for the operation; if no timeout is set, or the timeout is 0 - # (means unlimited) returns nil. - def remaining_timeout_ms - seconds = remaining_timeout_sec - return nil if seconds.nil? - - (seconds * 1_000).to_i - end - def inspect "#<#{self.class} connection_global_id=#{connection_global_id.inspect} deadline=#{deadline.inspect} options=#{options.inspect} operation_timeouts=#{operation_timeouts.inspect}>" end - - # @return [ true | false ] Whether the timeout for the operation expired. - # If no timeout set, this method returns false. - def timeout_expired? - if has_timeout? - Utils.monotonic_time >= deadline - else - false - end - end - - # Check whether the operation timeout expired, and raises an appropriate - # error if yes. - # - # @raise [ Error::TimeoutError ] - def check_timeout! - if timeout_expired? - raise Error::TimeoutError, "Operation took more than #{timeout_sec} seconds" - end - end - - private - - def calculate_deadline(opts = {}, session = nil) - if opts[:operation_timeout_ms] && session&.with_transaction_deadline - raise ArgumentError, 'Cannot override timeout_ms inside with_transaction block' - end - - if session&.with_transaction_deadline - session&.with_transaction_deadline - elsif operation_timeout_ms = opts[:operation_timeout_ms] - if operation_timeout_ms > 0 - Utils.monotonic_time + (operation_timeout_ms / 1_000.0) - elsif operation_timeout_ms == 0 - 0 - elsif operation_timeout_ms < 0 - raise ArgumentError, /must be a non-negative integer/ - end - elsif inherited_timeout_ms = opts[:inherited_timeout_ms] - if inherited_timeout_ms > 0 - Utils.monotonic_time + (inherited_timeout_ms / 1_000.0) - elsif inherited_timeout_ms == 0 - 0 - end - end - end end end end diff --git a/spec/runners/unified/ambiguous_operations.rb b/spec/runners/unified/ambiguous_operations.rb new file mode 100644 index 0000000000..80356e2584 --- /dev/null +++ b/spec/runners/unified/ambiguous_operations.rb @@ -0,0 +1,14 @@ +# frozen_string_literal: true + +module Unified + module AmbiguousOperations + + def find(op) + entities.get(:collection, op['object']) + crud_find(op) + rescue Unified::Error::EntityMissing => e + entities.get(:bucket, op['object']) + gridfs_find(op) + end + end +end diff --git a/spec/runners/unified/crud_operations.rb b/spec/runners/unified/crud_operations.rb index 50a97f2cdc..4a76a06b67 100644 --- a/spec/runners/unified/crud_operations.rb +++ b/spec/runners/unified/crud_operations.rb @@ -5,7 +5,7 @@ module Unified module CrudOperations - def find(op) + def crud_find(op) get_find_view(op).to_a end diff --git a/spec/runners/unified/grid_fs_operations.rb b/spec/runners/unified/grid_fs_operations.rb index c5cad546bf..2121eb0fa4 100644 --- a/spec/runners/unified/grid_fs_operations.rb +++ b/spec/runners/unified/grid_fs_operations.rb @@ -5,17 +5,38 @@ module Unified module GridFsOperations + def gridfs_find(op) + bucket = entities.get(:bucket, op.use!('object')) + use_arguments(op) do |args| + filter = args.use!('filter') + + opts = extract_options(args, 'allowDiskUse', + 'skip', 'hint','timeoutMS', + 'noCursorTimeout', 'sort', 'limit') + + bucket.find(filter,opts).to_a + end + end + def delete(op) bucket = entities.get(:bucket, op.use!('object')) use_arguments(op) do |args| - bucket.delete(args.use!('id')) + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + bucket.delete(args.use!('id'), opts) end end def download(op) bucket = entities.get(:bucket, op.use!('object')) use_arguments(op) do |args| - stream = bucket.open_download_stream(args.use!('id')) + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + stream = bucket.open_download_stream(args.use!('id'), opts) stream.read end end @@ -48,6 +69,9 @@ def upload(op) if disable_md5 = args.use('disableMD5') opts[:disable_md5] = disable_md5 end + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end contents = transform_contents(args.use!('source')) file_id = nil bucket.open_upload_stream(args.use!('filename'), **opts) do |stream| @@ -58,6 +82,17 @@ def upload(op) end end + def drop(op) + bucket = entities.get(:bucket, op.use!('object')) + use_arguments(op) do |args| + opts = {} + if timeout_ms = args.use('timeoutMS') + opts[:timeout_ms] = timeout_ms + end + bucket.drop(opts) + end + end + private def transform_contents(contents) diff --git a/spec/runners/unified/test.rb b/spec/runners/unified/test.rb index a01323761c..000c8383fc 100644 --- a/spec/runners/unified/test.rb +++ b/spec/runners/unified/test.rb @@ -2,6 +2,7 @@ # rubocop:todo all require 'runners/crud/requirement' +require 'runners/unified/ambiguous_operations' require 'runners/unified/client_side_encryption_operations' require 'runners/unified/crud_operations' require 'runners/unified/grid_fs_operations' @@ -17,6 +18,7 @@ module Unified class Test + include AmbiguousOperations include ClientSideEncryptionOperations include CrudOperations include GridFsOperations @@ -413,6 +415,7 @@ def execute_operation(op) rescue Mongo::Error, bson_error, Mongo::Auth::Unauthorized, ArgumentError => e if expected_error.use('isTimeoutError') unless Mongo::Error::TimeoutError === e + raise e raise Error::ErrorMismatch, %Q,Expected TimeoutError ("isTimeoutError") but got #{e}, end end diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml new file mode 100644 index 0000000000..b03812b719 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-advanced.yml @@ -0,0 +1,207 @@ +description: "timeoutMS behaves correctly for advanced GridFS API operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + # Tests for the "rename" operation. + # Ruby driver does not support rename for GridFS bucket + + # - description: "timeoutMS can be overridden for a rename" + # operations: + # - name: failPoint + # object: testRunner + # arguments: + # client: *failPointClient + # failPoint: + # configureFailPoint: failCommand + # mode: { times: 1 } + # data: + # failCommands: ["update"] + # blockConnection: true + # blockTimeMS: 100 + # - name: rename + # object: *bucket + # arguments: + # id: *fileDocumentId + # newFilename: "foo" + # timeoutMS: 2000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 2000ms should let it succeed. + # expectEvents: + # - client: *client + # events: + # - commandStartedEvent: + # commandName: update + # databaseName: *databaseName + # command: + # update: *filesCollectionName + # maxTimeMS: { $$type: ["int", "long"] } + + # - description: "timeoutMS applied to update during a rename" + # operations: + # - name: failPoint + # object: testRunner + # arguments: + # client: *failPointClient + # failPoint: + # configureFailPoint: failCommand + # mode: { times: 1 } + # data: + # failCommands: ["update"] + # blockConnection: true + # blockTimeMS: 100 + # - name: rename + # object: *bucket + # arguments: + # id: *fileDocumentId + # newFilename: "foo" + # expectError: + # isTimeoutError: true + # expectEvents: + # - client: *client + # events: + # - commandStartedEvent: + # commandName: update + # databaseName: *databaseName + # command: + # update: *filesCollectionName + # maxTimeMS: { $$type: ["int", "long"] } + + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents + # assertions as these assertions reduce test robustness and can cause flaky failures. + + - description: "timeoutMS can be overridden for drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + arguments: + timeoutMS: 2000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 2000ms should let it succeed. + + - description: "timeoutMS applied to files collection drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: drop + databaseName: *databaseName + command: + drop: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to chunks collection drop" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # Skip the drop for the files collection. + skip: 1 + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 100 + - name: drop + object: *bucket + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to drop as a whole, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["drop"] + blockConnection: true + blockTimeMS: 50 + - name: drop + object: *bucket + expectError: + isTimeoutError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml new file mode 100644 index 0000000000..9c72537c38 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-delete.yml @@ -0,0 +1,152 @@ +description: "timeoutMS behaves correctly for GridFS delete operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + - description: "timeoutMS can be overridden for delete" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + + - description: "timeoutMS applied to delete against the files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to delete against the chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # The first "delete" will be against the files collection, so we skip it. + skip: 1 + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 100 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + + # Test that drivers are not refreshing the timeout between commands. We test this by blocking both "delete" commands + # for 50ms each. The delete should inherit timeoutMS=75 from the client/database and the server takes over 75ms + # total, so the operation should fail. + - description: "timeoutMS applied to entire delete, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 50 + - name: delete + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml new file mode 100644 index 0000000000..772ffd6e08 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-download.yml @@ -0,0 +1,182 @@ +description: "timeoutMS behaves correctly for GridFS download operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: + - _id: &fileDocumentId { $oid: "000000000000000000000005" } + length: 8 + chunkSize: 4 + uploadDate: { $date: "1970-01-01T00:00:00.000Z" } + filename: "length-8" + contentType: "application/octet-stream" + aliases: [] + metadata: {} + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: + - _id: { $oid: "000000000000000000000005" } + files_id: *fileDocumentId + n: 0 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + - _id: { $oid: "000000000000000000000006" } + files_id: *fileDocumentId + n: 1 + data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344 + +tests: + - description: "timeoutMS can be overridden for download" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + + - description: "timeoutMS applied to find to get files document" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to find to get chunks" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: + # The first "find" will be against the files collection, so we skip it. + skip: 1 + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *chunksCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + # Test that drivers are not refreshing the timeout between commands. We test this by blocking both "find" commands + # for 50ms each. The download should inherit timeoutMS=75 from the client/database and the server takes over 75ms + # total, so the operation should fail. + - description: "timeoutMS applied to entire download, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 50 + - name: download + object: *bucket + arguments: + id: *fileDocumentId + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *chunksCollectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml new file mode 100644 index 0000000000..000150ae67 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-find.yml @@ -0,0 +1,100 @@ +description: "timeoutMS behaves correctly for GridFS find operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: [] + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "timeoutMS can be overridden for a find" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: find + object: *bucket + arguments: + filter: {} + timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed. + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } + + - description: "timeoutMS applied to find command" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: find + object: *bucket + arguments: + filter: {} + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *filesCollectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml b/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml new file mode 100644 index 0000000000..51e1366878 --- /dev/null +++ b/spec/spec_tests/data/client_side_operations_timeout/gridfs-upload.yml @@ -0,0 +1,249 @@ +description: "timeoutMS behaves correctly for GridFS upload operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid # GridFS ops can be slow on serverless. + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 75 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: &databaseName test + - bucket: + id: &bucket bucket + database: *database + - collection: + id: &filesCollection filesCollection + database: *database + collectionName: &filesCollectionName fs.files + - collection: + id: &chunksCollection chunksCollection + database: *database + collectionName: &chunksCollectionName fs.chunks + +initialData: + - collectionName: *filesCollectionName + databaseName: *databaseName + documents: [] + - collectionName: *chunksCollectionName + databaseName: *databaseName + documents: [] + +tests: + # Many tests in this file do not specify command monitoring expectations because GridFS uploads internally do a + # number of operations, so expecting an exact set of commands can cause flaky failures. + + - description: "timeoutMS can be overridden for upload" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + timeoutMS: 1000 + + # On the first write to the bucket, drivers check if the files collection is empty to see if indexes need to be + # created. + - description: "timeoutMS applied to initial find on files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # On the first write to the bucket, drivers check if the files collection has the correct indexes. + - description: "timeoutMS applied to listIndexes on files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # If the files collection is empty when the first write to the bucket occurs, drivers attempt to create an index + # on the bucket's files collection. + - description: "timeoutMS applied to index creation for files collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # On the first write to the bucket, drivers check if the chunks collection has the correct indexes. + - description: "timeoutMS applied to listIndexes on chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # The first listIndexes will be on the files collection, so we skip it. + mode: { skip: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # If the files collection is empty when the first write to the bucket occurs, drivers attempt to create an index + # on the bucket's chunks collection. + - description: "timeoutMS applied to index creation for chunks collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # This index is created after the one on the files collection, so we skip the first createIndexes command + # and target the second. + mode: { skip: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to chunk insertion" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + - description: "timeoutMS applied to creation of files document" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Skip the insert to upload the chunk. Because the whole file fits into one chunk, the second insert will + # be the files document upload. + mode: { skip: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 100 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true + + # Test that drivers apply timeoutMS to the entire upload rather than refreshing it between individual commands. We + # test this by blocking the "find" and "listIndexes" commands for 50ms each and performing an upload. The upload + # should inherit timeoutMS=75 from the client/database and the server takes over 75ms total, so the operation should + # fail. + - description: "timeoutMS applied to upload as a whole, not individual parts" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find", "listIndexes"] + blockConnection: true + blockTimeMS: 50 + - name: upload + object: *bucket + arguments: + filename: filename + source: { $$hexBytes: "1122334455" } + expectError: + isTimeoutError: true From a3011e9989a956995346d0e2f4f742268e1fe79a Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Mon, 13 May 2024 16:30:44 +0200 Subject: [PATCH 2/7] Refactor --- lib/mongo/auth/aws/credentials_retriever.rb | 63 +++++++++---------- lib/mongo/crypt/auto_encrypter.rb | 8 +-- lib/mongo/crypt/context.rb | 30 +++++---- lib/mongo/crypt/explicit_encrypter.rb | 4 +- .../crypt/kms/azure/credentials_retriever.rb | 31 +++++---- .../crypt/kms/gcp/credentials_retriever.rb | 15 +++-- 6 files changed, 69 insertions(+), 82 deletions(-) diff --git a/lib/mongo/auth/aws/credentials_retriever.rb b/lib/mongo/auth/aws/credentials_retriever.rb index a8feac1dce..e8250fe9de 100644 --- a/lib/mongo/auth/aws/credentials_retriever.rb +++ b/lib/mongo/auth/aws/credentials_retriever.rb @@ -69,8 +69,7 @@ def initialize(user = nil, credentials_cache: CredentialsCache.instance) # Retrieves a valid set of credentials, if possible, or raises # Auth::InvalidConfiguration. # - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout, if any. # # @return [ Auth::Aws::Credentials ] A valid set of credentials. # @@ -80,14 +79,14 @@ def initialize(user = nil, credentials_cache: CredentialsCache.instance) # retrieved from any source. # @raise Error::TimeoutError if credentials cannot be retrieved within # the timeout defined on the operation context. - def credentials(context = nil) + def credentials(timeout_holder = nil) credentials = credentials_from_user(user) return credentials unless credentials.nil? credentials = credentials_from_environment return credentials unless credentials.nil? - credentials = @credentials_cache.fetch { obtain_credentials_from_endpoints(context) } + credentials = @credentials_cache.fetch { obtain_credentials_from_endpoints(timeout_holder) } return credentials unless credentials.nil? raise Auth::Aws::CredentialsNotFound @@ -132,8 +131,7 @@ def credentials_from_environment # Returns credentials from the AWS metadata endpoints. # - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed or the obtained credentials are invalid. @@ -142,12 +140,12 @@ def credentials_from_environment # of credentials. # @ raise Error::TimeoutError if credentials cannot be retrieved within # the timeout defined on the operation context. - def obtain_credentials_from_endpoints(context = nil) - if (credentials = web_identity_credentials(context)) && credentials_valid?(credentials, 'Web identity token') + def obtain_credentials_from_endpoints(timeout_holder = nil) + if (credentials = web_identity_credentials(timeout_holder)) && credentials_valid?(credentials, 'Web identity token') credentials - elsif (credentials = ecs_metadata_credentials(context)) && credentials_valid?(credentials, 'ECS task metadata') + elsif (credentials = ecs_metadata_credentials(timeout_holder)) && credentials_valid?(credentials, 'ECS task metadata') credentials - elsif (credentials = ec2_metadata_credentials(context)) && credentials_valid?(credentials, 'EC2 instance metadata') + elsif (credentials = ec2_metadata_credentials(timeout_holder)) && credentials_valid?(credentials, 'EC2 instance metadata') credentials end end @@ -155,27 +153,26 @@ def obtain_credentials_from_endpoints(context = nil) # Returns credentials from the EC2 metadata endpoint. The credentials # could be empty, partial or invalid. # - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. # @ raise Error::TimeoutError if credentials cannot be retrieved within # the timeout defined on the operation context. - def ec2_metadata_credentials(context = nil) - context&.check_timeout! + def ec2_metadata_credentials(timeout_holder = nil) + timeout_holder&.check_timeout! http = Net::HTTP.new('169.254.169.254') req = Net::HTTP::Put.new('/latest/api/token', # The TTL is required in order to obtain the metadata token. {'x-aws-ec2-metadata-token-ttl-seconds' => '30'}) - resp = with_timeout(context) do + resp = with_timeout(timeout_holder) do http.request(req) end if resp.code != '200' return nil end metadata_token = resp.body - resp = with_timeout(context) do + resp = with_timeout(timeout_holder) do http_get(http, '/latest/meta-data/iam/security-credentials', metadata_token) end if resp.code != '200' @@ -208,15 +205,14 @@ def ec2_metadata_credentials(context = nil) # Returns credentials from the ECS metadata endpoint. The credentials # could be empty, partial or invalid. # - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. # @ raise Error::TimeoutError if credentials cannot be retrieved within # the timeout defined on the operation context. - def ecs_metadata_credentials(context = nil) - context&.check_timeout! + def ecs_metadata_credentials(timeout_holder = nil) + timeout_holder&.check_timeout! relative_uri = ENV['AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'] if relative_uri.nil? || relative_uri.empty? return nil @@ -230,7 +226,7 @@ def ecs_metadata_credentials(context = nil) # a leading slash must be added by the driver, but this is not # in fact needed. req = Net::HTTP::Get.new(relative_uri) - resp = with_timeout(context) do + resp = with_timeout(timeout_holder) do http.request(req) end if resp.code != '200' @@ -252,16 +248,15 @@ def ecs_metadata_credentials(context = nil) # inside EKS. See https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html # for further details. # - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. - def web_identity_credentials(context = nil) + def web_identity_credentials(timeout_holder = nil) web_identity_token, role_arn, role_session_name = prepare_web_identity_inputs return nil if web_identity_token.nil? response = request_web_identity_credentials( - web_identity_token, role_arn, role_session_name, context + web_identity_token, role_arn, role_session_name, timeout_holder ) return if response.nil? credentials_from_web_identity_response(response) @@ -296,16 +291,15 @@ def prepare_web_identity_inputs # that the caller is assuming. # @param [ String ] role_session_name An identifier for the assumed # role session. - # @param [ Operation::Context | nil ] context Context of the operation - # credentials are retrieved for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ Net::HTTPResponse | nil ] AWS API response if successful, # otherwise nil. # # @ raise Error::TimeoutError if credentials cannot be retrieved within # the timeout defined on the operation context. - def request_web_identity_credentials(token, role_arn, role_session_name, context) - context&.check_timeout! + def request_web_identity_credentials(token, role_arn, role_session_name, timeout_holder) + timeout_holder&.check_timeout! uri = URI('https://sts.amazonaws.com/') params = { 'Action' => 'AssumeRoleWithWebIdentity', @@ -317,7 +311,7 @@ def request_web_identity_credentials(token, role_arn, role_session_name, context uri.query = ::URI.encode_www_form(params) req = Net::HTTP::Post.new(uri) req['Accept'] = 'application/json' - resp = with_timeout(context) do + resp = with_timeout(timeout_holder) do Net::HTTP.start(uri.hostname, uri.port, use_ssl: true) do |https| https.request(req) end @@ -396,13 +390,12 @@ def credentials_valid?(credentials, source) # We use +Timeout.timeout+ here because there is no other acceptable easy # way to time limit http requests. # - # @param [ Operation::Context | nil ] context Context of the operation + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @ raise Error::TimeoutError if deadline exceeded. - def with_timeout(context) - context&.check_timeout! - timeout = context&.remaining_timeout_sec || METADATA_TIMEOUT - exception_class = if context&.csot? + def with_timeout(timeout_holder) + timeout = timeout_holder&.remaining_timeout_sec! || METADATA_TIMEOUT + exception_class = if timeout_holder&.csot? Error::TimeoutError else nil diff --git a/lib/mongo/crypt/auto_encrypter.rb b/lib/mongo/crypt/auto_encrypter.rb index 9d7e51a257..3fa8970783 100644 --- a/lib/mongo/crypt/auto_encrypter.rb +++ b/lib/mongo/crypt/auto_encrypter.rb @@ -187,13 +187,13 @@ def encrypt? # @param [ Hash ] command The command to be encrypted. # # @return [ BSON::Document ] The encrypted command. - def encrypt(database_name, command, context) + def encrypt(database_name, command, timeout_holder) AutoEncryptionContext.new( @crypt_handle, @encryption_io, database_name, command - ).run_state_machine(context) + ).run_state_machine(timeout_holder) end # Decrypt a database command. @@ -201,12 +201,12 @@ def encrypt(database_name, command, context) # @param [ Hash ] command The command with encrypted fields. # # @return [ BSON::Document ] The decrypted command. - def decrypt(command, context) + def decrypt(command, timeout_holder) AutoDecryptionContext.new( @crypt_handle, @encryption_io, command - ).run_state_machine(context) + ).run_state_machine(timeout_holder) end # Close the resources created by the AutoEncrypter. diff --git a/lib/mongo/crypt/context.rb b/lib/mongo/crypt/context.rb index ac2d5d9bc5..d8c6772999 100644 --- a/lib/mongo/crypt/context.rb +++ b/lib/mongo/crypt/context.rb @@ -66,8 +66,8 @@ def state # Runs the mongocrypt_ctx_t state machine and handles # all I/O on behalf of # - # @param [ Operation::Context ] context Context of the operation the state - # machine is run for. + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeouts for the + # operation the state. # # @return [ BSON::Document ] A BSON document representing the outcome # of the state machine. Contents can differ depending on how the @@ -78,10 +78,9 @@ def state # # This method is not currently unit tested. It is integration tested # in spec/integration/explicit_encryption_spec.rb - def run_state_machine(context) + def run_state_machine(timeout_holder) while true - context.check_timeout! - timeout_ms = context.remaining_timeout_ms + timeout_ms = timeout_holder.remaining_timeout_ms! case state when :error Binding.check_ctx_status(self) @@ -123,7 +122,7 @@ def run_state_machine(context) when :need_kms_credentials Binding.ctx_provide_kms_providers( self, - retrieve_kms_credentials(context).to_document + retrieve_kms_credentials(timeout_holder).to_document ) else raise Error::CryptError.new( @@ -152,16 +151,15 @@ def mongocrypt_feed(doc) # Retrieves KMS credentials for providers that are configured # for automatic credentials retrieval. # - # @param [ Operation::Context ] context Context of the operation credentials - # are retrieved for. + # @param [ CsotTimeoutHolder ] timeout_holder CSOT timeout. # # @return [ Crypt::KMS::Credentials ] Credentials for the configured # KMS providers. - def retrieve_kms_credentials(context) + def retrieve_kms_credentials(timeout_holder) providers = {} if kms_providers.aws&.empty? begin - aws_credentials = Mongo::Auth::Aws::CredentialsRetriever.new.credentials(context) + aws_credentials = Mongo::Auth::Aws::CredentialsRetriever.new.credentials(timeout_holder) rescue Auth::Aws::CredentialsNotFound raise Error::CryptError.new( "Could not locate AWS credentials (checked environment variables, ECS and EC2 metadata)" @@ -170,10 +168,10 @@ def retrieve_kms_credentials(context) providers[:aws] = aws_credentials.to_h end if kms_providers.gcp&.empty? - providers[:gcp] = { access_token: gcp_access_token } + providers[:gcp] = { access_token: gcp_access_token(timeout_holder) } end if kms_providers.azure&.empty? - providers[:azure] = { access_token: azure_access_token } + providers[:azure] = { access_token: azure_access_token(timeout_holder) } end KMS::Credentials.new(providers) end @@ -183,8 +181,8 @@ def retrieve_kms_credentials(context) # @return [ String ] A GCP access token. # # @raise [ Error::CryptError ] If the GCP access token could not be - def gcp_access_token - KMS::GCP::CredentialsRetriever.fetch_access_token + def gcp_access_token(timeout_holder) + KMS::GCP::CredentialsRetriever.fetch_access_token(timeout_holder) rescue KMS::CredentialsNotFound => e raise Error::CryptError.new( "Could not locate GCP credentials: #{e.class}: #{e.message}" @@ -197,9 +195,9 @@ def gcp_access_token # # @raise [ Error::CryptError ] If the Azure access token could not be # retrieved. - def azure_access_token + def azure_access_token(timeout_holder) if @cached_azure_token.nil? || @cached_azure_token.expired? - @cached_azure_token = KMS::Azure::CredentialsRetriever.fetch_access_token + @cached_azure_token = KMS::Azure::CredentialsRetriever.fetch_access_token(timeout_holder: timeout_holder) end @cached_azure_token.access_token rescue KMS::CredentialsNotFound => e diff --git a/lib/mongo/crypt/explicit_encrypter.rb b/lib/mongo/crypt/explicit_encrypter.rb index cf38f51d5b..ff45bb2d91 100644 --- a/lib/mongo/crypt/explicit_encrypter.rb +++ b/lib/mongo/crypt/explicit_encrypter.rb @@ -339,8 +339,8 @@ def updates_from_data_key_documents(documents) end end - def context - Operation::Context.new( + def timeout_holder + CsotTimeoutHolder.new( operation_timeouts: { operation_timeout_ms: @timeout_ms } diff --git a/lib/mongo/crypt/kms/azure/credentials_retriever.rb b/lib/mongo/crypt/kms/azure/credentials_retriever.rb index b2e1578b1c..c1b6898fa9 100644 --- a/lib/mongo/crypt/kms/azure/credentials_retriever.rb +++ b/lib/mongo/crypt/kms/azure/credentials_retriever.rb @@ -34,17 +34,16 @@ class CredentialsRetriever # request. This is used for testing. # @param [String | nil] metadata_host Azure metadata host. This # is used for testing. - # @param [ Operation::Context | nil ] context Context of the operation - # access token is fetched for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ KMS::Azure::AccessToken ] Azure access token. # # @raise [KMS::CredentialsNotFound] If credentials could not be found. # @raise Error::TimeoutError if credentials cannot be retrieved within - # the timeout defined on the operation context. - def self.fetch_access_token(extra_headers: {}, metadata_host: nil, context: nil) + # the timeout. + def self.fetch_access_token(extra_headers: {}, metadata_host: nil, timeout_holder: nil) uri, req = prepare_request(extra_headers, metadata_host) - parsed_response = fetch_response(uri, req, context) + parsed_response = fetch_response(uri, req, timeout_holder) Azure::AccessToken.new( parsed_response.fetch('access_token'), Integer(parsed_response.fetch('expires_in')) @@ -82,17 +81,16 @@ def self.prepare_request(extra_headers, metadata_host) # # @param [URI] uri URI to Azure metadata host. # @param [Net::HTTP::Get] req Request object. - # @param [ Operation::Context | nil ] context Context of the operation - # access token is fetched for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [Hash] Parsed response. # # @raise [KMS::CredentialsNotFound] If cannot fetch response or # response is invalid. # @raise Error::TimeoutError if credentials cannot be retrieved within - # the timeout defined on the operation context. - def self.fetch_response(uri, req, context) - resp = do_request(uri, req, context) + # the timeout. + def self.fetch_response(uri, req, timeout_holder) + resp = do_request(uri, req, timeout_holder) if resp.code != '200' raise KMS::CredentialsNotFound, "Azure metadata host responded with code #{resp.code}" @@ -108,18 +106,17 @@ def self.fetch_response(uri, req, context) # # @param [URI] uri URI to Azure metadata host. # @param [Net::HTTP::Get] req Request object. - # @param [ Operation::Context | nil ] context Context of the operation - # access token is fetched for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [Net::HTTPResponse] Response object. # # @raise [KMS::CredentialsNotFound] If cannot execute request. # @raise Error::TimeoutError if credentials cannot be retrieved within - # the timeout defined on the operation context. - def self.do_request(uri, req, context) - context&.check_timeout! - timeout = context&.remaining_timeout_sec || 10 - exception_class = if context&.csot? + # the timeout. + def self.do_request(uri, req, timeout_holder) + timeout_holder&.check_timeout! + timeout = timeout_holder&.remaining_timeout_sec || 10 + exception_class = if timeout_holder&.csot? Error::TimeoutError else nil diff --git a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb index 14d75c2ad5..3f94bca6a6 100644 --- a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb +++ b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb @@ -31,19 +31,18 @@ class CredentialsRetriever # Fetch GCP access token. # - # @param [ Operation::Context ] context Context of the operation the access - # token is fetched for. + # @param [ CsotTimeoutHolder | nil ] timeout_holder CSOT timeout. # # @return [ String ] GCP access token. # # @raise [ KMS::CredentialsNotFound ] # @raise [ Error::TimeoutError ] - def self.fetch_access_token(context = nil) + def self.fetch_access_token(timeout_holder = nil) host = ENV.fetch(METADATA_HOST_ENV) { DEFAULT_HOST } uri = URI("http://#{host}/computeMetadata/v1/instance/service-accounts/default/token") req = Net::HTTP::Get.new(uri) req['Metadata-Flavor'] = 'Google' - resp = fetch_response(uri, req, context) + resp = fetch_response(uri, req, timeout_holder) if resp.code != '200' raise KMS::CredentialsNotFound, "GCE metadata host responded with code #{resp.code}" @@ -58,10 +57,10 @@ def self.fetch_access_token(context = nil) "Could not receive GCP metadata response; #{e.class}: #{e.message}" end - def self.fetch_response(uri, req, context) - context&.check_timeout! - if context&.has_timeout? - ::Timeout.timeout(context.remaining_timeout_sec, Error:TimeoutError) do + def self.fetch_response(uri, req, timeout_holder) + timeout_holder&.check_timeout! + if timeout_holder&.has_timeout? + ::Timeout.timeout(timeout_holder.remaining_timeout_sec, Error:TimeoutError) do do_fetch(uri, req) end else From 0069a48fffb8360ec0b34ef9aae72af701b6da2d Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Mon, 13 May 2024 16:58:58 +0200 Subject: [PATCH 3/7] Context --- lib/mongo/crypt/explicit_encrypter.rb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/mongo/crypt/explicit_encrypter.rb b/lib/mongo/crypt/explicit_encrypter.rb index ff45bb2d91..b90a837b0f 100644 --- a/lib/mongo/crypt/explicit_encrypter.rb +++ b/lib/mongo/crypt/explicit_encrypter.rb @@ -74,10 +74,10 @@ def create_and_insert_data_key(master_key_document, key_alt_names, key_material master_key_document, key_alt_names, key_material - ).run_state_machine(context) + ).run_state_machine(timeout_holder) @encryption_io.insert_data_key( - data_key_document, timeout_ms: context.remaining_timeout_ms + data_key_document, timeout_ms: timeout_holder.remaining_timeout_ms! ).inserted_id end @@ -116,7 +116,7 @@ def encrypt(value, options) @encryption_io, { v: value }, options - ).run_state_machine(context)['v'] + ).run_state_machine(timeout_holder)['v'] end # Encrypts a Match Expression or Aggregate Expression to query a range index. @@ -175,7 +175,7 @@ def encrypt_expression(expression, options) @encryption_io, { v: expression }, options - ).run_state_machine(context)['v'] + ).run_state_machine(timeout_holder)['v'] end # Decrypts a value that has already been encrypted @@ -189,7 +189,7 @@ def decrypt(value) @crypt_handle, @encryption_io, { v: value } - ).run_state_machine(context)['v'] + ).run_state_machine(timeout_holder)['v'] end # Adds a key_alt_name for the key in the key vault collection with the given id. @@ -275,7 +275,7 @@ def rewrap_many_data_key(filter, opts = {}) @encryption_io, filter, master_key_document - ).run_state_machine(context) + ).run_state_machine(timeout_holder) return RewrapManyDataKeyResult.new(nil) if rewrap_result.nil? From af8c6679a408f0484be71d17cfc0e4e5ed5a9f05 Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Mon, 13 May 2024 17:25:10 +0200 Subject: [PATCH 4/7] wip --- lib/mongo/auth/aws/credentials_retriever.rb | 4 ++-- .../client_side_encryption/on_demand_aws_credentials_spec.rb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/mongo/auth/aws/credentials_retriever.rb b/lib/mongo/auth/aws/credentials_retriever.rb index e8250fe9de..7ab83750d5 100644 --- a/lib/mongo/auth/aws/credentials_retriever.rb +++ b/lib/mongo/auth/aws/credentials_retriever.rb @@ -158,7 +158,7 @@ def obtain_credentials_from_endpoints(timeout_holder = nil) # @return [ Auth::Aws::Credentials | nil ] A set of credentials, or nil # if retrieval failed. # @ raise Error::TimeoutError if credentials cannot be retrieved within - # the timeout defined on the operation context. + # the timeout. def ec2_metadata_credentials(timeout_holder = nil) timeout_holder&.check_timeout! http = Net::HTTP.new('169.254.169.254') @@ -180,7 +180,7 @@ def ec2_metadata_credentials(timeout_holder = nil) end role_name = resp.body escaped_role_name = CGI.escape(role_name).gsub('+', '%20') - resp = with_timeout(context) do + resp = with_timeout(timeout_holder) do http_get(http, "/latest/meta-data/iam/security-credentials/#{escaped_role_name}", metadata_token) end if resp.code != '200' diff --git a/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb b/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb index abb6933d16..93a400d6a8 100644 --- a/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb +++ b/spec/integration/client_side_encryption/on_demand_aws_credentials_spec.rb @@ -37,7 +37,7 @@ it 'raises an error' do expect_any_instance_of( Mongo::Auth::Aws::CredentialsRetriever - ).to receive(:credentials).with(kind_of(Mongo::Operation::Context)).once.and_raise( + ).to receive(:credentials).with(kind_of(Mongo::CsotTimeoutHolder)).once.and_raise( Mongo::Auth::Aws::CredentialsNotFound ) From b3e50204f75396dc1677426f4621c6c3381f5478 Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Tue, 14 May 2024 09:15:07 +0200 Subject: [PATCH 5/7] Fix rubocop offence --- .../crypt/kms/gcp/credentials_retriever.rb | 2 +- lib/mongo/csot_timeout_holder.rb | 62 ++++++++++--------- lib/mongo/operation/shared/timed.rb | 2 +- spec/mongo/operation/shared/csot/examples.rb | 2 +- spec/runners/unified/ambiguous_operations.rb | 3 +- 5 files changed, 37 insertions(+), 34 deletions(-) diff --git a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb index 3f94bca6a6..bbb2887fbd 100644 --- a/lib/mongo/crypt/kms/gcp/credentials_retriever.rb +++ b/lib/mongo/crypt/kms/gcp/credentials_retriever.rb @@ -59,7 +59,7 @@ def self.fetch_access_token(timeout_holder = nil) def self.fetch_response(uri, req, timeout_holder) timeout_holder&.check_timeout! - if timeout_holder&.has_timeout? + if timeout_holder&.timeout? ::Timeout.timeout(timeout_holder.remaining_timeout_sec, Error:TimeoutError) do do_fetch(uri, req) end diff --git a/lib/mongo/csot_timeout_holder.rb b/lib/mongo/csot_timeout_holder.rb index 4d09a485af..949f04ac23 100644 --- a/lib/mongo/csot_timeout_holder.rb +++ b/lib/mongo/csot_timeout_holder.rb @@ -15,16 +15,17 @@ # limitations under the License. module Mongo + # This class stores operation timeout and provides corresponding helper methods. + # + # @api private class CsotTimeoutHolder def initialize(session: nil, operation_timeouts: {}) @deadline = calculate_deadline(operation_timeouts, session) @operation_timeouts = operation_timeouts - @timeout_sec = if @deadline then @deadline - Utils.monotonic_time end + @timeout_sec = (@deadline - Utils.monotonic_time if @deadline) end - attr_reader :deadline - attr_reader :timeout_sec - attr_reader :operation_timeouts + attr_reader :deadline, :timeout_sec, :operation_timeouts # @return [ true | false ] Whether CSOT is enabled for the operation def csot? @@ -33,15 +34,15 @@ def csot? # @return [ true | false ] Returns false if CSOT is not enabled, or if # CSOT is set to 0 (means unlimited), otherwise true. - def has_timeout? - ![nil, 0].include?(@deadline) + def timeout? + ![ nil, 0 ].include?(@deadline) end # @return [ Float | nil ] Returns the remaining seconds of the timeout # set for the operation; if no timeout is set, or the timeout is 0 # (means unlimited) returns nil. def remaining_timeout_sec - return nil unless has_timeout? + return nil unless timeout? deadline - Utils.monotonic_time end @@ -69,7 +70,7 @@ def remaining_timeout_ms! # @return [ true | false ] Whether the timeout for the operation expired. # If no timeout set, this method returns false. def timeout_expired? - if has_timeout? + if timeout? Utils.monotonic_time >= deadline else false @@ -81,34 +82,37 @@ def timeout_expired? # # @raise [ Error::TimeoutError ] def check_timeout! - if timeout_expired? - raise Error::TimeoutError, "Operation took more than #{timeout_sec} seconds" - end + return unless timeout_expired? + + raise Error::TimeoutError, "Operation took more than #{timeout_sec} seconds" end private def calculate_deadline(opts = {}, session = nil) - if opts[:operation_timeout_ms] && session&.with_transaction_deadline - raise ArgumentError, 'Cannot override timeout_ms inside with_transaction block' + check_no_override_inside_transaction!(opts, session) + return session&.with_transaction_deadline if session&.with_transaction_deadline + + if (operation_timeout_ms = opts[:operation_timeout_ms]) + calculate_deadline_from_opertation_timeout(operation_timeout_ms) + elsif (inherited_timeout_ms = opts[:inherited_timeout_ms]) + calculate_deadline_from_timeout_ms(inherited_timeout_ms) end + end + + def check_no_override_inside_transaction!(opts, session) + return unless opts[:operation_timeout_ms] && session&.with_transaction_deadline + + raise ArgumentError, 'Cannot override timeout_ms inside with_transaction block' + end - if session&.with_transaction_deadline - session&.with_transaction_deadline - elsif operation_timeout_ms = opts[:operation_timeout_ms] - if operation_timeout_ms > 0 - Utils.monotonic_time + (operation_timeout_ms / 1_000.0) - elsif operation_timeout_ms == 0 - 0 - elsif operation_timeout_ms < 0 - raise ArgumentError, "timeout_ms must be a non-negative integer but #{operation_timeout_ms} given" - end - elsif inherited_timeout_ms = opts[:inherited_timeout_ms] - if inherited_timeout_ms > 0 - Utils.monotonic_time + (inherited_timeout_ms / 1_000.0) - elsif inherited_timeout_ms == 0 - 0 - end + def calculate_deadline_from_timeout_ms(operation_timeout_ms) + if operation_timeout_ms.positive? + Utils.monotonic_time + (operation_timeout_ms / 1_000.0) + elsif operation_timeout_ms.zero? + 0 + elsif operation_timeout_ms.negative? + raise ArgumentError, "timeout_ms must be a non-negative integer but #{operation_timeout_ms} given" end end end diff --git a/lib/mongo/operation/shared/timed.rb b/lib/mongo/operation/shared/timed.rb index 8d048344cf..a023e0a3a8 100644 --- a/lib/mongo/operation/shared/timed.rb +++ b/lib/mongo/operation/shared/timed.rb @@ -38,7 +38,7 @@ def apply_relevant_timeouts_to(spec, connection) # @return [ Hash ] the result of yielding to the block (which must be # a Hash) def with_max_time(connection) - if context&.has_timeout? + if context&.timeout? max_time_sec = context.remaining_timeout_sec - connection.server.minimum_round_trip_time raise Mongo::Error::TimeoutError if max_time_sec <= 0 diff --git a/spec/mongo/operation/shared/csot/examples.rb b/spec/mongo/operation/shared/csot/examples.rb index 439c369775..24c43a43d1 100644 --- a/spec/mongo/operation/shared/csot/examples.rb +++ b/spec/mongo/operation/shared/csot/examples.rb @@ -33,7 +33,7 @@ def self.included(example_context) let(:context) do Mongo::Operation::Context.new(view: view).tap do |context| allow(context).to receive(:remaining_timeout_sec).and_return(remaining_timeout_sec) - allow(context).to receive(:has_timeout?).and_return(!remaining_timeout_sec.nil?) + allow(context).to receive(:timeout?).and_return(!remaining_timeout_sec.nil?) end end diff --git a/spec/runners/unified/ambiguous_operations.rb b/spec/runners/unified/ambiguous_operations.rb index 80356e2584..c83364aa0f 100644 --- a/spec/runners/unified/ambiguous_operations.rb +++ b/spec/runners/unified/ambiguous_operations.rb @@ -2,11 +2,10 @@ module Unified module AmbiguousOperations - def find(op) entities.get(:collection, op['object']) crud_find(op) - rescue Unified::Error::EntityMissing => e + rescue Unified::Error::EntityMissing entities.get(:bucket, op['object']) gridfs_find(op) end From df26c62bc2203676ba78cacc093b4dd0cca5c352 Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Tue, 14 May 2024 09:30:10 +0200 Subject: [PATCH 6/7] 3379 --- lib/mongo/csot_timeout_holder.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/mongo/csot_timeout_holder.rb b/lib/mongo/csot_timeout_holder.rb index 949f04ac23..9d7d15c0a0 100644 --- a/lib/mongo/csot_timeout_holder.rb +++ b/lib/mongo/csot_timeout_holder.rb @@ -94,7 +94,7 @@ def calculate_deadline(opts = {}, session = nil) return session&.with_transaction_deadline if session&.with_transaction_deadline if (operation_timeout_ms = opts[:operation_timeout_ms]) - calculate_deadline_from_opertation_timeout(operation_timeout_ms) + calculate_deadline_from_timeout_ms(operation_timeout_ms) elsif (inherited_timeout_ms = opts[:inherited_timeout_ms]) calculate_deadline_from_timeout_ms(inherited_timeout_ms) end From db444ce5dbad89d9bda533938c93c9c335b60e31 Mon Sep 17 00:00:00 2001 From: Dmitry Rybakov Date: Thu, 16 May 2024 12:46:42 +0200 Subject: [PATCH 7/7] Fix code review remarks --- spec/runners/unified/test.rb | 1 - 1 file changed, 1 deletion(-) diff --git a/spec/runners/unified/test.rb b/spec/runners/unified/test.rb index 000c8383fc..1715575e10 100644 --- a/spec/runners/unified/test.rb +++ b/spec/runners/unified/test.rb @@ -415,7 +415,6 @@ def execute_operation(op) rescue Mongo::Error, bson_error, Mongo::Auth::Unauthorized, ArgumentError => e if expected_error.use('isTimeoutError') unless Mongo::Error::TimeoutError === e - raise e raise Error::ErrorMismatch, %Q,Expected TimeoutError ("isTimeoutError") but got #{e}, end end