Skip to content

Commit 9e96ad6

Browse files
committed
perf(cubestore): Reduce memory usage (allocations) with pinned slices
1 parent 6275735 commit 9e96ad6

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

rust/cubestore/cubestore/src/metastore/rocks_table.rs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -518,14 +518,14 @@ pub trait RocksTable: BaseRocksTable + Debug + Send + Sync {
518518
let inserted_row = self.insert_row_kv(row_id, serialized_row)?;
519519

520520
batch_pipe.add_event(MetaStoreEvent::Insert(Self::table_id(), row_id));
521-
if self.snapshot().get(&inserted_row.key)?.is_some() {
521+
if self.snapshot().get_pinned(&inserted_row.key)?.is_some() {
522522
return Err(CubeError::internal(format!("Primary key constraint violation. Primary key already exists for a row id {}: {:?}", row_id, &row)));
523523
}
524524
batch_pipe.batch().put(inserted_row.key, inserted_row.val);
525525

526526
let index_row = self.insert_index_row(&row, row_id)?;
527527
for to_insert in index_row {
528-
if self.snapshot().get(&to_insert.key)?.is_some() {
528+
if self.snapshot().get_pinned(&to_insert.key)?.is_some() {
529529
return Err(CubeError::internal(format!("Primary key constraint violation in secondary index. Primary key already exists for a row id {}: {:?}", row_id, &row)));
530530
}
531531
batch_pipe.batch().put(to_insert.key, to_insert.val);
@@ -573,15 +573,15 @@ pub trait RocksTable: BaseRocksTable + Debug + Send + Sync {
573573
fn migration_check_table(&self) -> Result<(), CubeError> {
574574
let snapshot = self.snapshot();
575575

576-
let table_info = snapshot.get(
576+
let table_info = snapshot.get_pinned(
577577
&RowKey::TableInfo {
578578
table_id: Self::table_id(),
579579
}
580580
.to_bytes(),
581581
)?;
582582

583583
if let Some(table_info) = table_info {
584-
let table_info = self.deserialize_table_info(table_info.as_slice())?;
584+
let table_info = self.deserialize_table_info(&table_info)?;
585585

586586
if table_info.version != Self::T::version()
587587
|| table_info.value_version != Self::T::value_version()
@@ -633,14 +633,14 @@ pub trait RocksTable: BaseRocksTable + Debug + Send + Sync {
633633
fn migration_check_indexes(&self) -> Result<(), CubeError> {
634634
let snapshot = self.snapshot();
635635
for index in Self::indexes().into_iter() {
636-
let index_info = snapshot.get(
636+
let index_info = snapshot.get_pinned(
637637
&RowKey::SecondaryIndexInfo {
638638
index_id: Self::index_id(index.get_id()),
639639
}
640640
.to_bytes(),
641641
)?;
642642
if let Some(index_info) = index_info {
643-
let index_info = self.deserialize_index_info(index_info.as_slice())?;
643+
let index_info = self.deserialize_index_info(&index_info)?;
644644
if index_info.version != index.version()
645645
|| index_info.value_version != index.value_version()
646646
{
@@ -977,7 +977,7 @@ pub trait RocksTable: BaseRocksTable + Debug + Send + Sync {
977977
RowKey::SecondaryIndex(Self::index_id(index_id), secondary_key_hash, row_id);
978978
let secondary_index_key = secondary_index_row_key.to_bytes();
979979

980-
if let Some(secondary_key_bytes) = self.db().get(&secondary_index_key)? {
980+
if let Some(secondary_key_bytes) = self.db().get_pinned(&secondary_index_key)? {
981981
let index_value_version = RocksSecondaryIndex::value_version(secondary_index);
982982
let new_value = match RocksSecondaryIndexValue::from_bytes(
983983
&secondary_key_bytes,
@@ -1102,10 +1102,10 @@ pub trait RocksTable: BaseRocksTable + Debug + Send + Sync {
11021102

11031103
fn get_row(&self, row_id: u64) -> Result<Option<IdRow<Self::T>>, CubeError> {
11041104
let ref db = self.snapshot();
1105-
let res = db.get(RowKey::Table(Self::table_id(), row_id).to_bytes())?;
1105+
let res = db.get_pinned(RowKey::Table(Self::table_id(), row_id).to_bytes())?;
11061106

11071107
if let Some(buffer) = res {
1108-
let row = self.deserialize_id_row(row_id, buffer.as_slice())?;
1108+
let row = self.deserialize_id_row(row_id, &buffer)?;
11091109
return Ok(Some(row));
11101110
}
11111111

0 commit comments

Comments
 (0)