Skip to content

Commit e87f4dd

Browse files
RUST-1785 Make ExceededTimeLimit a read-retryable error (#997)
1 parent 8b22217 commit e87f4dd

File tree

4 files changed

+287
-2
lines changed

4 files changed

+287
-2
lines changed

src/error.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ use crate::{bson::Document, options::ServerAddress, sdam::TopologyVersion};
1616
const RECOVERING_CODES: [i32; 5] = [11600, 11602, 13436, 189, 91];
1717
const NOTWRITABLEPRIMARY_CODES: [i32; 3] = [10107, 13435, 10058];
1818
const SHUTTING_DOWN_CODES: [i32; 2] = [11600, 91];
19-
const RETRYABLE_READ_CODES: [i32; 12] = [
20-
11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 134,
19+
const RETRYABLE_READ_CODES: [i32; 13] = [
20+
11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 134, 262,
2121
];
2222
const RETRYABLE_WRITE_CODES: [i32; 12] = [
2323
11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262,

src/test/spec/json/retryable-reads/README.rst

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,10 +232,80 @@ This test requires MongoDB 4.2.9+ for ``blockConnection`` support in the failpoi
232232

233233
9. Disable the failpoint.
234234

235+
Retrying Reads in a Sharded Cluster
236+
===================================
237+
238+
These tests will be used to ensure drivers properly retry reads on a different
239+
mongos.
240+
241+
Retryable Reads Are Retried on a Different mongos if One is Available
242+
---------------------------------------------------------------------
243+
244+
This test MUST be executed against a sharded cluster that has at least two
245+
mongos instances.
246+
247+
1. Ensure that a test is run against a sharded cluster that has at least two
248+
mongoses. If there are more than two mongoses in the cluster, pick two to
249+
test against.
250+
251+
2. Create a client per mongos using the direct connection, and configure the
252+
following fail points on each mongos::
253+
254+
{
255+
configureFailPoint: "failCommand",
256+
mode: { times: 1 },
257+
data: {
258+
failCommands: ["find"],
259+
errorCode: 6,
260+
closeConnection: true
261+
}
262+
}
263+
264+
3. Create a client with ``retryReads=true`` that connects to the cluster,
265+
providing the two selected mongoses as seeds.
266+
267+
4. Enable command monitoring, and execute a ``find`` command that is
268+
supposed to fail on both mongoses.
269+
270+
5. Asserts that there were failed command events from each mongos.
271+
272+
6. Disable the fail points.
273+
274+
275+
Retryable Reads Are Retried on the Same mongos if No Others are Available
276+
-------------------------------------------------------------------------
277+
278+
1. Ensure that a test is run against a sharded cluster. If there are multiple
279+
mongoses in the cluster, pick one to test against.
280+
281+
2. Create a client that connects to the mongos using the direct connection,
282+
and configure the following fail point on the mongos::
283+
284+
{
285+
configureFailPoint: "failCommand",
286+
mode: { times: 1 },
287+
data: {
288+
failCommands: ["find"],
289+
errorCode: 6,
290+
closeConnection: true
291+
}
292+
}
293+
294+
3. Create a client with ``retryReads=true`` that connects to the cluster,
295+
providing the selected mongos as the seed.
296+
297+
4. Enable command monitoring, and execute a ``find`` command.
298+
299+
5. Asserts that there was a failed command and a successful command event.
300+
301+
6. Disable the fail point.
302+
235303

236304
Changelog
237305
=========
238306

307+
:2023-08-26 Add prose tests for retrying in a sharded cluster.
308+
239309
:2022-04-22: Clarifications to ``serverless`` and ``useMultipleMongoses``.
240310

241311
:2022-01-10: Create legacy and unified subdirectories for new unified tests
Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
{
2+
"description": "ExceededTimeLimit is a retryable read",
3+
"schemaVersion": "1.3",
4+
"runOnRequirements": [
5+
{
6+
"minServerVersion": "4.0",
7+
"topologies": [
8+
"single",
9+
"replicaset"
10+
]
11+
},
12+
{
13+
"minServerVersion": "4.1.7",
14+
"topologies": [
15+
"sharded",
16+
"load-balanced"
17+
]
18+
}
19+
],
20+
"createEntities": [
21+
{
22+
"client": {
23+
"id": "client0",
24+
"useMultipleMongoses": false,
25+
"observeEvents": [
26+
"commandStartedEvent"
27+
]
28+
}
29+
},
30+
{
31+
"database": {
32+
"id": "database0",
33+
"client": "client0",
34+
"databaseName": "retryable-reads-tests"
35+
}
36+
},
37+
{
38+
"collection": {
39+
"id": "collection0",
40+
"database": "database0",
41+
"collectionName": "exceededtimelimit-test"
42+
}
43+
}
44+
],
45+
"initialData": [
46+
{
47+
"collectionName": "exceededtimelimit-test",
48+
"databaseName": "retryable-reads-tests",
49+
"documents": [
50+
{
51+
"_id": 1,
52+
"x": 11
53+
},
54+
{
55+
"_id": 2,
56+
"x": 22
57+
},
58+
{
59+
"_id": 3,
60+
"x": 33
61+
}
62+
]
63+
}
64+
],
65+
"tests": [
66+
{
67+
"description": "Find succeeds on second attempt after ExceededTimeLimit",
68+
"operations": [
69+
{
70+
"name": "failPoint",
71+
"object": "testRunner",
72+
"arguments": {
73+
"client": "client0",
74+
"failPoint": {
75+
"configureFailPoint": "failCommand",
76+
"mode": {
77+
"times": 1
78+
},
79+
"data": {
80+
"failCommands": [
81+
"find"
82+
],
83+
"errorCode": 262
84+
}
85+
}
86+
}
87+
},
88+
{
89+
"name": "find",
90+
"arguments": {
91+
"filter": {
92+
"_id": {
93+
"$gt": 1
94+
}
95+
}
96+
},
97+
"object": "collection0",
98+
"expectResult": [
99+
{
100+
"_id": 2,
101+
"x": 22
102+
},
103+
{
104+
"_id": 3,
105+
"x": 33
106+
}
107+
]
108+
}
109+
],
110+
"expectEvents": [
111+
{
112+
"client": "client0",
113+
"events": [
114+
{
115+
"commandStartedEvent": {
116+
"command": {
117+
"find": "exceededtimelimit-test",
118+
"filter": {
119+
"_id": {
120+
"$gt": 1
121+
}
122+
}
123+
},
124+
"commandName": "find",
125+
"databaseName": "retryable-reads-tests"
126+
}
127+
},
128+
{
129+
"commandStartedEvent": {
130+
"command": {
131+
"find": "exceededtimelimit-test",
132+
"filter": {
133+
"_id": {
134+
"$gt": 1
135+
}
136+
}
137+
},
138+
"commandName": "find",
139+
"databaseName": "retryable-reads-tests"
140+
}
141+
}
142+
]
143+
}
144+
]
145+
}
146+
]
147+
}
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
description: "ExceededTimeLimit is a retryable read"
2+
3+
schemaVersion: "1.3"
4+
5+
runOnRequirements:
6+
- minServerVersion: "4.0"
7+
topologies: [single, replicaset]
8+
- minServerVersion: "4.1.7"
9+
topologies: [sharded, load-balanced]
10+
11+
createEntities:
12+
- client:
13+
id: &client0 client0
14+
# Ensure the `configureFailpoint` and `find` commands are run on the same mongos
15+
useMultipleMongoses: false
16+
observeEvents: [ commandStartedEvent ]
17+
- database:
18+
id: &database0 database0
19+
client: *client0
20+
databaseName: &database0Name "retryable-reads-tests"
21+
- collection:
22+
id: &collection0 collection0
23+
database: *database0
24+
collectionName: &collection0Name "exceededtimelimit-test"
25+
26+
initialData:
27+
- collectionName: *collection0Name
28+
databaseName: *database0Name
29+
documents:
30+
- { _id: 1, x: 11 }
31+
- { _id: 2, x: 22 }
32+
- { _id: 3, x: 33 }
33+
34+
tests:
35+
- description: "Find succeeds on second attempt after ExceededTimeLimit"
36+
operations:
37+
- name: failPoint
38+
object: testRunner
39+
arguments:
40+
client: *client0
41+
failPoint:
42+
configureFailPoint: failCommand
43+
mode: { times: 1 }
44+
data:
45+
failCommands: [ "find" ]
46+
errorCode: 262 # ExceededTimeLimit
47+
- name: find
48+
arguments:
49+
filter: { _id: { $gt: 1 } }
50+
object: *collection0
51+
expectResult:
52+
- { _id: 2, x: 22 }
53+
- { _id: 3, x: 33 }
54+
expectEvents:
55+
- client: *client0
56+
events:
57+
- commandStartedEvent:
58+
command:
59+
find: *collection0Name
60+
filter: { _id: { $gt: 1 } }
61+
commandName: find
62+
databaseName: *database0Name
63+
- commandStartedEvent:
64+
command:
65+
find: *collection0Name
66+
filter: { _id: { $gt: 1 } }
67+
commandName: find
68+
databaseName: *database0Name

0 commit comments

Comments
 (0)