From 67c7fa1d7d34b521e8b54f2084836b88c41d85b6 Mon Sep 17 00:00:00 2001 From: cfzjywxk Date: Mon, 15 Jan 2024 11:34:18 +0800 Subject: [PATCH] cop: fix the scan panic when checksum is enabled (#16373) close tikv/tikv#16371 Fix the scan panic issue when checksum is enabled. Signed-off-by: cfzjywxk Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- components/test_coprocessor/src/store.rs | 20 +++++++++++++++++++ .../src/codec/row/v2/row_slice.rs | 12 +++++++++-- tests/integrations/coprocessor/test_select.rs | 9 +++++++-- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/components/test_coprocessor/src/store.rs b/components/test_coprocessor/src/store.rs index 6763ea7bb1a..221ed5afe46 100644 --- a/components/test_coprocessor/src/store.rs +++ b/components/test_coprocessor/src/store.rs @@ -299,6 +299,26 @@ impl Store { .collect(); FixtureStore::new(data) } + + pub fn insert_all_null_row( + &mut self, + tbl: &Table, + ctx: Context, + with_checksum: bool, + extra_checksum: Option, + ) { + self.begin(); + let inserts = self + .insert_into(tbl) + .set(&tbl["id"], Datum::Null) + .set(&tbl["name"], Datum::Null) + .set(&tbl["count"], Datum::Null) + .set_v2(&tbl["id"], ScalarValue::Int(None)) + .set_v2(&tbl["name"], ScalarValue::Bytes(None)) + .set_v2(&tbl["count"], ScalarValue::Int(None)); + inserts.execute_with_v2_checksum(ctx.clone(), with_checksum, extra_checksum); + self.commit(); + } } /// A trait for a general implementation to convert to a Txn store. diff --git a/components/tidb_query_datatype/src/codec/row/v2/row_slice.rs b/components/tidb_query_datatype/src/codec/row/v2/row_slice.rs index aa5eb3fc56f..e86ebe28802 100644 --- a/components/tidb_query_datatype/src/codec/row/v2/row_slice.rs +++ b/components/tidb_query_datatype/src/codec/row/v2/row_slice.rs @@ -233,7 +233,11 @@ impl RowSlice<'_> { RowSlice::Big { offsets, values, .. } => { - let last_slice_idx = offsets.get(non_null_col_num - 1).unwrap() as usize; + let last_slice_idx = if non_null_col_num == 0 { + 0 + } else { + offsets.get(non_null_col_num - 1).unwrap() as usize + }; let slice = values.slice; *values = LeBytes::new(&slice[..last_slice_idx]); &slice[last_slice_idx..] @@ -241,7 +245,11 @@ impl RowSlice<'_> { RowSlice::Small { offsets, values, .. } => { - let last_slice_idx = offsets.get(non_null_col_num - 1).unwrap() as usize; + let last_slice_idx = if non_null_col_num == 0 { + 0 + } else { + offsets.get(non_null_col_num - 1).unwrap() as usize + }; let slice = values.slice; *values = LeBytes::new(&slice[..last_slice_idx]); &slice[last_slice_idx..] diff --git a/tests/integrations/coprocessor/test_select.rs b/tests/integrations/coprocessor/test_select.rs index 1a062924dae..4e5418cdc14 100644 --- a/tests/integrations/coprocessor/test_select.rs +++ b/tests/integrations/coprocessor/test_select.rs @@ -2087,11 +2087,16 @@ fn test_select_v2_format_with_checksum() { for extra_checksum in [None, Some(132423)] { // The row value encoded with checksum bytes should have no impact on cop task // processing and related result chunk filling. - let (_, endpoint) = + let (mut store, endpoint) = init_data_with_commit_v2_checksum(&product, &data, true, extra_checksum); + store.insert_all_null_row(&product, Context::default(), true, extra_checksum); let req = DagSelect::from(&product).build(); let mut resp = handle_select(&endpoint, req); - let spliter = DagChunkSpliter::new(resp.take_chunks().into(), 3); + let mut spliter = DagChunkSpliter::new(resp.take_chunks().into(), 3); + let first_row = spliter.next().unwrap(); + assert_eq!(first_row[0], Datum::I64(0)); + assert_eq!(first_row[1], Datum::Null); + assert_eq!(first_row[2], Datum::Null); for (row, (id, name, cnt)) in spliter.zip(data.clone()) { let name_datum = name.map(|s| s.as_bytes()).into(); let expected_encoded = datum::encode_value(