diff --git a/internal/integration/json_helpers_test.go b/internal/integration/json_helpers_test.go index fbd71083d7..3eee697d18 100644 --- a/internal/integration/json_helpers_test.go +++ b/internal/integration/json_helpers_test.go @@ -111,6 +111,8 @@ func createClientOptions(t testing.TB, opts bson.Raw) *options.ClientOptions { case "serverSelectionTimeoutMS": sst := convertValueToMilliseconds(t, opt) clientOpts.SetServerSelectionTimeout(sst) + case "timeoutMS": + clientOpts.SetTimeout(time.Duration(opt.Int32()) * time.Millisecond) case "minPoolSize": clientOpts.SetMinPoolSize(uint64(opt.AsInt64())) case "maxPoolSize": @@ -454,8 +456,8 @@ func errorFromResult(t testing.TB, result interface{}) *operationError { if err != nil { return nil } - if expected.ErrorCodeName == nil && expected.ErrorContains == nil && len(expected.ErrorLabelsOmit) == 0 && - len(expected.ErrorLabelsContain) == 0 { + if expected.ErrorCodeName == nil && expected.ErrorContains == nil && expected.IsTimeoutError == nil && + len(expected.ErrorLabelsOmit) == 0 && len(expected.ErrorLabelsContain) == 0 { return nil } @@ -547,6 +549,13 @@ func verifyError(expected *operationError, actual error) error { return fmt.Errorf("expected error %w to not contain label %q", actual, label) } } + if expected.IsTimeoutError != nil { + isTimeoutError := mongo.IsTimeout(actual) + if *expected.IsTimeoutError != isTimeoutError { + return fmt.Errorf("expected error %w to be a timeout error: %v, is timeout error: %v", + actual, *expected.IsTimeoutError, isTimeoutError) + } + } return nil } diff --git a/internal/integration/mtest/mongotest.go b/internal/integration/mtest/mongotest.go index 3967bf7f82..908b5cccf1 100644 --- a/internal/integration/mtest/mongotest.go +++ b/internal/integration/mtest/mongotest.go @@ -49,6 +49,11 @@ const ( namespaceExistsErrCode int32 = 48 ) +type failPoint struct { + name string + client *mongo.Client +} + // T is a wrapper around testing.T. type T struct { // connsCheckedOut is the net number of connections checked out during test execution. @@ -68,7 +73,8 @@ type T struct { createdColls []*Collection // collections created in this test proxyDialer *proxyDialer dbName, collName string - failPointNames []string + hasFailPoint bool + failPoints []failPoint minServerVersion string maxServerVersion string validTopologies []TopologyKind @@ -166,7 +172,14 @@ func (t *T) cleanup() { // always disconnect the client regardless of clientType because Client.Disconnect will work against // all deployments - _ = t.Client.Disconnect(context.Background()) + if !t.hasFailPoint { + _ = t.Client.Disconnect(context.Background()) + } + for _, fp := range t.failPoints { + _ = fp.client.Disconnect(context.Background()) + } + t.hasFailPoint = false + t.failPoints = t.failPoints[:0] } // Run creates a new T instance for a sub-test and runs the given callback. It also creates a new collection using the @@ -220,7 +233,7 @@ func (t *T) RunOpts(name string, opts *Options, callback func(mt *T)) { sub.ClearCollections() } // only disconnect client if it's not being shared - if sub.shareClient == nil || !*sub.shareClient { + if (sub.shareClient == nil || !*sub.shareClient) && !sub.hasFailPoint { _ = sub.Client.Disconnect(context.Background()) } assert.Equal(sub, 0, sessions, "%v sessions checked out", sessions) @@ -364,7 +377,10 @@ func (t *T) ResetClient(opts *options.ClientOptions) { t.clientOpts = opts } - _ = t.Client.Disconnect(context.Background()) + if !t.hasFailPoint { + _ = t.Client.Disconnect(context.Background()) + } + t.hasFailPoint = false t.createTestClient() t.DB = t.Client.Database(t.dbName) t.Coll = t.DB.Collection(t.collName, t.collOpts) @@ -523,7 +539,8 @@ func (t *T) SetFailPoint(fp failpoint.FailPoint) { if err := SetFailPoint(fp, t.Client); err != nil { t.Fatal(err) } - t.failPointNames = append(t.failPointNames, fp.ConfigureFailPoint) + t.hasFailPoint = true + t.failPoints = append(t.failPoints, failPoint{name: fp.ConfigureFailPoint, client: t.Client}) } // SetFailPointFromDocument sets the fail point represented by the given document for the client associated with T. This @@ -536,29 +553,34 @@ func (t *T) SetFailPointFromDocument(fp bson.Raw) { } name := fp.Index(0).Value().StringValue() - t.failPointNames = append(t.failPointNames, name) + t.hasFailPoint = true + t.failPoints = append(t.failPoints, failPoint{name: name, client: t.Client}) } // TrackFailPoint adds the given fail point to the list of fail points to be disabled when the current test finishes. // This function does not create a fail point on the server. func (t *T) TrackFailPoint(fpName string) { - t.failPointNames = append(t.failPointNames, fpName) + t.hasFailPoint = true + t.failPoints = append(t.failPoints, failPoint{name: fpName, client: t.Client}) } // ClearFailPoints disables all previously set failpoints for this test. func (t *T) ClearFailPoints() { - db := t.Client.Database("admin") - for _, fp := range t.failPointNames { + for _, fp := range t.failPoints { cmd := failpoint.FailPoint{ - ConfigureFailPoint: fp, + ConfigureFailPoint: fp.name, Mode: failpoint.ModeOff, } - err := db.RunCommand(context.Background(), cmd).Err() + err := fp.client.Database("admin").RunCommand(context.Background(), cmd).Err() if err != nil { - t.Fatalf("error clearing fail point %s: %v", fp, err) + t.Fatalf("error clearing fail point %s: %v", fp.name, err) + } + if fp.client != t.Client { + _ = fp.client.Disconnect(context.Background()) } } - t.failPointNames = t.failPointNames[:0] + t.hasFailPoint = false + t.failPoints = t.failPoints[:0] } // CloneDatabase modifies the default database for this test to match the given options. diff --git a/internal/integration/unified_spec_test.go b/internal/integration/unified_spec_test.go index 11c7974666..0e9e9031e5 100644 --- a/internal/integration/unified_spec_test.go +++ b/internal/integration/unified_spec_test.go @@ -155,6 +155,7 @@ type operationError struct { ErrorCodeName *string `bson:"errorCodeName"` ErrorLabelsContain []string `bson:"errorLabelsContain"` ErrorLabelsOmit []string `bson:"errorLabelsOmit"` + IsTimeoutError *bool `bson:"isTimeoutError"` } var directories = []string{ diff --git a/internal/spectest/skip.go b/internal/spectest/skip.go index c3c5d04fe3..4deada70ed 100644 --- a/internal/spectest/skip.go +++ b/internal/spectest/skip.go @@ -404,13 +404,6 @@ var skipTests = map[string][]string{ "TestBsonBinaryVectorSpec/Tests_of_Binary_subtype_9,_Vectors,_with_dtype_FLOAT32/Insufficient_vector_data_with_5_bytes_FLOAT32/Marshaling", }, - // TODO(GODRIVER-3521): Extend Legacy Unified Spec Runner to include - // client-side-encryption timeoutMS. - "Extend Legacy Unified Spec Runner for client-side-encryption timeoutMS (GODRIVER-3521)": { - "TestClientSideEncryptionSpec/timeoutMS.json/remaining_timeoutMS_applied_to_find_to_get_keyvault_data", - "TestClientSideEncryptionSpec/timeoutMS.json/timeoutMS_applied_to_listCollections_to_get_collection_schema", - }, - // TODO(GODRIVER-3486): Support auto encryption in unified tests. "Support auto encryption in unified tests (GODRIVER-3486)": { "TestUnifiedSpec/unified-test-format/tests/valid-pass/poc-queryable-encryption.json/insert,_replace,_and_find_with_queryable_encryption", diff --git a/testdata/client-side-encryption/legacy/timeoutMS.json b/testdata/client-side-encryption/legacy/timeoutMS.json new file mode 100644 index 0000000000..9f24dd5663 --- /dev/null +++ b/testdata/client-side-encryption/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} \ No newline at end of file diff --git a/testdata/client-side-encryption/legacy/timeoutMS.yml b/testdata/client-side-encryption/legacy/timeoutMS.yml new file mode 100644 index 0000000000..bb71d67650 --- /dev/null +++ b/testdata/client-side-encryption/legacy/timeoutMS.yml @@ -0,0 +1,67 @@ +runOn: + - minServerVersion: "4.4" +database_name: &database_name "cse-timeouts-db" +collection_name: &collection_name "cse-timeouts-coll" + +data: [] +json_schema: {'properties': {'encrypted_w_altname': {'encrypt': {'keyId': '/altname', 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}, 'random': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string_equivalent': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}}, 'bsonType': 'object'} +key_vault_data: [{'status': 1, '_id': {'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}, 'masterKey': {'provider': 'aws', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', 'region': 'us-east-1'}, 'updateDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyMaterial': {'$binary': {'base64': 'AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyAltNames': ['altname', 'another_altname']}] + +tests: + - description: "timeoutMS applied to listCollections to get collection schema" + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + clientOptions: + autoEncryptOpts: + kmsProviders: + aws: {} # Credentials filled in from environment. + timeoutMS: 50 + operations: + - name: insertOne + arguments: + document: &doc0 { _id: 1, encrypted_string: "string0", random: "abc" } + result: + isTimeoutError: true + expectations: + # Auto encryption will request the collection info. + - command_started_event: + command: + listCollections: 1 + filter: + name: *collection_name + maxTimeMS: { $$type: ["int", "long"] } + command_name: listCollections + + # Test that timeoutMS applies to the sum of all operations done for client-side encryption. This is done by blocking + # listCollections and find for 30ms each and running an insertOne with timeoutMS=50. There should be one + # listCollections command and one "find" command, so the sum should take more than timeoutMS. A second listCollections + # event doesn't occur due to the internal MongoClient lacking configured auto encryption, plus libmongocrypt holds the + # collection schema in cache for a minute. + # + # This test does not include command monitoring expectations because the exact command sequence is dependent on the + # amount of time taken by mongocryptd communication. In slow runs, mongocryptd communication can breach the timeout + # and result in the final "find" not being sent. + - description: "remaining timeoutMS applied to find to get keyvault data" + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections", "find"] + blockConnection: true + blockTimeMS: 30 + clientOptions: + autoEncryptOpts: + kmsProviders: + aws: {} # Credentials filled in from environment. + timeoutMS: 50 + operations: + - name: insertOne + arguments: + document: *doc0 + result: + isTimeoutError: true \ No newline at end of file