Skip to content

Commit

Permalink
Merge pull request #1202 from Lorak-mmk/retry-policy-naming
Browse files Browse the repository at this point in the history
Update RetryPolicy and SE terminology: "host" -> "target"
  • Loading branch information
wprzytula authored Feb 7, 2025
2 parents e39f449 + 0884f8b commit bceac6a
Show file tree
Hide file tree
Showing 9 changed files with 63 additions and 56 deletions.
4 changes: 2 additions & 2 deletions scylla/src/client/pager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,12 +236,12 @@ where
last_error = request_error.into();

match retry_decision {
RetryDecision::RetrySameNode(cl) => {
RetryDecision::RetrySameTarget(cl) => {
self.metrics.inc_retries_num();
current_consistency = cl.unwrap_or(current_consistency);
continue 'same_node_retries;
}
RetryDecision::RetryNextNode(cl) => {
RetryDecision::RetryNextTarget(cl) => {
self.metrics.inc_retries_num();
current_consistency = cl.unwrap_or(current_consistency);
continue 'nodes_in_plan;
Expand Down
4 changes: 2 additions & 2 deletions scylla/src/client/session.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1916,12 +1916,12 @@ impl Session {
last_error = Some(request_error.into());

match retry_decision {
RetryDecision::RetrySameNode(new_cl) => {
RetryDecision::RetrySameTarget(new_cl) => {
self.metrics.inc_retries_num();
current_consistency = new_cl.unwrap_or(current_consistency);
continue 'same_node_retries;
}
RetryDecision::RetryNextNode(new_cl) => {
RetryDecision::RetryNextTarget(new_cl) => {
self.metrics.inc_retries_num();
current_consistency = new_cl.unwrap_or(current_consistency);
continue 'nodes_in_plan;
Expand Down
20 changes: 10 additions & 10 deletions scylla/src/observability/history.rs
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ mod tests {
history_collector.log_attempt_error(
attempt_id,
&unexpected_response(CqlResponseKind::Ready),
&RetryDecision::RetrySameNode(Some(Consistency::Quorum)),
&RetryDecision::RetrySameTarget(Some(Consistency::Quorum)),
);

let second_attempt_id: AttemptId =
Expand All @@ -653,7 +653,7 @@ mod tests {
| request send time: 2022-02-22 20:22:22 UTC
| Error at 2022-02-22 20:22:22 UTC
| Error: Received unexpected response from the server: READY. Expected RESULT or ERROR response.
| Retry decision: RetrySameNode(Some(Quorum))
| Retry decision: RetrySameTarget(Some(Quorum))
|
| - Attempt #1 sent to 127.0.0.1:19042
| request send time: 2022-02-22 20:22:22 UTC
Expand Down Expand Up @@ -737,7 +737,7 @@ mod tests {
history_collector.log_attempt_error(
attempt1,
&unexpected_response(CqlResponseKind::Event),
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
);
let _attempt2: AttemptId =
history_collector.log_attempt_start(request_id, None, node3_addr());
Expand All @@ -749,7 +749,7 @@ mod tests {
history_collector.log_attempt_error(
spec2_attempt1,
&no_stream_id_error(),
&RetryDecision::RetrySameNode(Some(Consistency::Quorum)),
&RetryDecision::RetrySameTarget(Some(Consistency::Quorum)),
);

let spec2_attempt2: AttemptId =
Expand All @@ -761,7 +761,7 @@ mod tests {
history_collector.log_attempt_error(
spec1_attempt1,
&unavailable_error(),
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
);

let _spec4_attempt1: AttemptId =
Expand All @@ -780,7 +780,7 @@ mod tests {
| request send time: 2022-02-22 20:22:22 UTC
| Error at 2022-02-22 20:22:22 UTC
| Error: Received unexpected response from the server: EVENT. Expected RESULT or ERROR response.
| Retry decision: RetryNextNode(Some(Quorum))
| Retry decision: RetryNextTarget(Some(Quorum))
|
| - Attempt #1 sent to 127.0.0.3:19042
| request send time: 2022-02-22 20:22:22 UTC
Expand All @@ -793,7 +793,7 @@ mod tests {
| request send time: 2022-02-22 20:22:22 UTC
| Error at 2022-02-22 20:22:22 UTC
| Error: Database returned an error: Not enough nodes are alive to satisfy required consistency level (consistency: Quorum, required: 2, alive: 1), Error message: Not enough nodes to satisfy consistency
| Retry decision: RetryNextNode(Some(Quorum))
| Retry decision: RetryNextTarget(Some(Quorum))
|
|
| > Speculative fiber #1
Expand All @@ -802,7 +802,7 @@ mod tests {
| request send time: 2022-02-22 20:22:22 UTC
| Error at 2022-02-22 20:22:22 UTC
| Error: Unable to allocate stream id
| Retry decision: RetrySameNode(Some(Quorum))
| Retry decision: RetrySameTarget(Some(Quorum))
|
| - Attempt #1 sent to 127.0.0.1:19042
| request send time: 2022-02-22 20:22:22 UTC
Expand Down Expand Up @@ -836,7 +836,7 @@ mod tests {
history_collector.log_attempt_error(
request1_attempt1,
&unexpected_response(CqlResponseKind::Supported),
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
);
let request1_attempt2: AttemptId =
history_collector.log_attempt_start(request1_id, None, node2_addr());
Expand All @@ -859,7 +859,7 @@ mod tests {
| request send time: 2022-02-22 20:22:22 UTC
| Error at 2022-02-22 20:22:22 UTC
| Error: Received unexpected response from the server: SUPPORTED. Expected RESULT or ERROR response.
| Retry decision: RetryNextNode(Some(Quorum))
| Retry decision: RetryNextTarget(Some(Quorum))
|
| - Attempt #1 sent to 127.0.0.2:19042
| request send time: 2022-02-22 20:22:22 UTC
Expand Down
28 changes: 14 additions & 14 deletions scylla/src/policies/retry/default.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ impl RetrySession for DefaultRetrySession {
| RequestAttemptError::DbError(DbError::ServerError, _)
| RequestAttemptError::DbError(DbError::TruncateError, _) => {
if request_info.is_idempotent {
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
} else {
RetryDecision::DontRetry
}
Expand All @@ -75,7 +75,7 @@ impl RetrySession for DefaultRetrySession {
RequestAttemptError::DbError(DbError::Unavailable { .. }, _) => {
if !self.was_unavailable_retry {
self.was_unavailable_retry = true;
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
} else {
RetryDecision::DontRetry
}
Expand All @@ -97,7 +97,7 @@ impl RetrySession for DefaultRetrySession {
) => {
if !self.was_read_timeout_retry && received >= required && !*data_present {
self.was_read_timeout_retry = true;
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
} else {
RetryDecision::DontRetry
}
Expand All @@ -112,17 +112,17 @@ impl RetrySession for DefaultRetrySession {
&& *write_type == WriteType::BatchLog
{
self.was_write_timeout_retry = true;
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
} else {
RetryDecision::DontRetry
}
}
// The node is still bootstrapping it can't execute the request, we should try another one
RequestAttemptError::DbError(DbError::IsBootstrapping, _) => {
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
}
// Connection to the contacted node is overloaded, try another one
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextNode(None),
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextTarget(None),
// In all other cases propagate the error to the user
_ => RetryDecision::DontRetry,
}
Expand Down Expand Up @@ -236,7 +236,7 @@ mod tests {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&error, true)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
}

Expand Down Expand Up @@ -266,13 +266,13 @@ mod tests {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&error, false)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);

let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&error, true)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
}

Expand All @@ -292,7 +292,7 @@ mod tests {
let mut policy_not_idempotent = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy_not_idempotent.decide_should_retry(make_request_info(&error, false)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
assert_eq!(
policy_not_idempotent.decide_should_retry(make_request_info(&error, false)),
Expand All @@ -302,7 +302,7 @@ mod tests {
let mut policy_idempotent = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy_idempotent.decide_should_retry(make_request_info(&error, true)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
assert_eq!(
policy_idempotent.decide_should_retry(make_request_info(&error, true)),
Expand All @@ -329,7 +329,7 @@ mod tests {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&enough_responses_no_data, false)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info(&enough_responses_no_data, false)),
Expand All @@ -340,7 +340,7 @@ mod tests {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&enough_responses_no_data, true)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info(&enough_responses_no_data, true)),
Expand Down Expand Up @@ -425,7 +425,7 @@ mod tests {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info(&good_write_type, true)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info(&good_write_type, true)),
Expand Down
44 changes: 22 additions & 22 deletions scylla/src/policies/retry/downgrading_consistency.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ impl RetrySession for DowngradingConsistencyRetrySession {
RequestAttemptError::DbError(DbError::Unavailable { .. }, _) => {
// JAVA-764: if the requested consistency level is serial, it means that the operation failed at
// the paxos phase of a LWT.
// Retry on the next host, on the assumption that the initial coordinator could be network-isolated.
RetryDecision::RetryNextNode(None)
// Retry on the next target, on the assumption that the initial coordinator could be network-isolated.
RetryDecision::RetryNextTarget(None)
}
_ => RetryDecision::DontRetry,
};
Expand All @@ -65,18 +65,18 @@ impl RetrySession for DowngradingConsistencyRetrySession {

fn max_likely_to_work_cl(known_ok: i32, previous_cl: Consistency) -> RetryDecision {
let decision = if known_ok >= 3 {
RetryDecision::RetrySameNode(Some(Consistency::Three))
RetryDecision::RetrySameTarget(Some(Consistency::Three))
} else if known_ok == 2 {
RetryDecision::RetrySameNode(Some(Consistency::Two))
RetryDecision::RetrySameTarget(Some(Consistency::Two))
} else if known_ok == 1 || previous_cl == Consistency::EachQuorum {
// JAVA-1005: EACH_QUORUM does not report a global number of alive replicas
// so even if we get 0 alive replicas, there might be
// a node up in some other datacenter
RetryDecision::RetrySameNode(Some(Consistency::One))
RetryDecision::RetrySameTarget(Some(Consistency::One))
} else {
RetryDecision::DontRetry
};
if let RetryDecision::RetrySameNode(new_cl) = decision {
if let RetryDecision::RetrySameTarget(new_cl) = decision {
debug!(
"Decided to lower required consistency from {} to {:?}.",
previous_cl, new_cl
Expand All @@ -93,7 +93,7 @@ impl RetrySession for DowngradingConsistencyRetrySession {
| RequestAttemptError::DbError(DbError::ServerError, _)
| RequestAttemptError::DbError(DbError::TruncateError, _) => {
if request_info.is_idempotent {
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
} else {
RetryDecision::DontRetry
}
Expand Down Expand Up @@ -125,7 +125,7 @@ impl RetrySession for DowngradingConsistencyRetrySession {
max_likely_to_work_cl(*received, cl)
} else if !*data_present {
self.was_retry = true;
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
} else {
RetryDecision::DontRetry
}
Expand Down Expand Up @@ -153,18 +153,18 @@ impl RetrySession for DowngradingConsistencyRetrySession {
// retry with whatever consistency should allow to persist all
max_likely_to_work_cl(*received, cl)
}
WriteType::BatchLog => RetryDecision::RetrySameNode(None),
WriteType::BatchLog => RetryDecision::RetrySameTarget(None),

_ => RetryDecision::DontRetry,
}
}
}
// The node is still bootstrapping it can't execute the request, we should try another one
RequestAttemptError::DbError(DbError::IsBootstrapping, _) => {
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
}
// Connection to the contacted node is overloaded, try another one
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextNode(None),
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextTarget(None),
// In all other cases propagate the error to the user
_ => RetryDecision::DontRetry,
}
Expand Down Expand Up @@ -311,20 +311,20 @@ mod tests {
let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(&error, true, cl)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
}

fn max_likely_to_work_cl(known_ok: i32, current_cl: Consistency) -> RetryDecision {
if known_ok >= 3 {
RetryDecision::RetrySameNode(Some(Consistency::Three))
RetryDecision::RetrySameTarget(Some(Consistency::Three))
} else if known_ok == 2 {
RetryDecision::RetrySameNode(Some(Consistency::Two))
RetryDecision::RetrySameTarget(Some(Consistency::Two))
} else if known_ok == 1 || current_cl == Consistency::EachQuorum {
// JAVA-1005: EACH_QUORUM does not report a global number of alive replicas
// so even if we get 0 alive replicas, there might be
// a node up in some other datacenter
RetryDecision::RetrySameNode(Some(Consistency::One))
RetryDecision::RetrySameTarget(Some(Consistency::One))
} else {
RetryDecision::DontRetry
}
Expand Down Expand Up @@ -359,13 +359,13 @@ mod tests {
let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(&error, false, cl)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);

let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(&error, true, cl)),
RetryDecision::RetryNextNode(None)
RetryDecision::RetryNextTarget(None)
);
}
}
Expand Down Expand Up @@ -433,7 +433,7 @@ mod tests {
false,
cl
)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(
Expand All @@ -452,7 +452,7 @@ mod tests {
true,
cl
)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(
Expand Down Expand Up @@ -523,7 +523,7 @@ mod tests {
)),
expected_decision
);
if let RetryDecision::RetrySameNode(new_cl) = expected_decision {
if let RetryDecision::RetrySameTarget(new_cl) = expected_decision {
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(
&not_enough_responses_with_data,
Expand All @@ -544,7 +544,7 @@ mod tests {
)),
expected_decision
);
if let RetryDecision::RetrySameNode(new_cl) = expected_decision {
if let RetryDecision::RetrySameTarget(new_cl) = expected_decision {
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(
&not_enough_responses_with_data,
Expand Down Expand Up @@ -593,7 +593,7 @@ mod tests {
true,
cl
)),
RetryDecision::RetrySameNode(None)
RetryDecision::RetrySameTarget(None)
);
assert_eq!(
policy.decide_should_retry(make_request_info_with_cl(
Expand Down
Loading

0 comments on commit bceac6a

Please sign in to comment.