Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
179c6b6
POC: Add Tonbo Cloud
jonathanc-n Jul 29, 2025
d6374ed
fmt
jonathanc-n Jul 31, 2025
30a3241
small changes
jonathanc-n Aug 4, 2025
6db9818
update_cloud
jonathanc-n Aug 4, 2025
2a1456b
add
jonathanc-n Aug 5, 2025
3590c92
add Flight
jonathanc-n Aug 7, 2025
87b2dfc
add statistics
jonathanc-n Aug 7, 2025
c144261
Merge branch 'tonbo-io:main' into add-cloud
jonathanc-n Aug 7, 2025
734fe52
add prost
jonathanc-n Aug 8, 2025
3a96c09
flight
jonathanc-n Aug 8, 2025
8e340a1
tonbo cloud + fmt
jonathanc-n Aug 10, 2025
c62e3f9
remove
jonathanc-n Aug 10, 2025
084116f
fmt
jonathanc-n Aug 10, 2025
499e512
consume other record batch types
jonathanc-n Aug 10, 2025
29faf54
fmt
jonathanc-n Aug 10, 2025
82637b6
handle schema
jonathanc-n Aug 10, 2025
6520acc
Merge branch 'main' into add-cloud
jonathanc-n Aug 10, 2025
1799e1d
add
jonathanc-n Aug 10, 2025
d749448
Merge branch 'add-cloud' of https://github.com/jonathanc-n/tonbo into…
jonathanc-n Aug 10, 2025
de78342
fix conflicts
jonathanc-n Aug 10, 2025
74f56b1
fmt
jonathanc-n Aug 10, 2025
305fb23
Merge branch 'tonbo-io:main' into add-cloud
jonathanc-n Aug 10, 2025
524ebc5
fix
jonathanc-n Aug 10, 2025
69d1f72
add schema conversion test
jonathanc-n Aug 11, 2025
98d762c
make Value public for pg_tonbo
jonathanc-n Aug 11, 2025
3695e27
deal with projection
jonathanc-n Aug 15, 2025
381a949
Merge branch 'tonbo-io:main' into add-cloud
jonathanc-n Aug 20, 2025
2c5c2fc
Merge branch 'tonbo-io:main' into add-cloud
jonathanc-n Aug 20, 2025
69f8df9
add tests
jonathanc-n Aug 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
workspace = { members = ["parquet-lru", "tonbo_macros"] }
workspace = { members = ["parquet-lru", "tonbo_macros", "cloud"] }

[package]
description = "An embedded persistent KV database in Rust."
Expand Down
34 changes: 34 additions & 0 deletions cloud/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
[package]
build = "build.rs"
name = "tonbo-cloud"
version = "0.1.0"
edition = "2021"

[dependencies]
arrow = { version = "56.0.0", features = ["ipc"] }
arrow-array = "56.0.0"
arrow-flight = { version = "56.0.0", features = ["flight-sql"] }
async-trait = "0.1"
async-stream = "0.3"
bytes = "1"
flume = { version = "0.11", features = ["async"] }
fusio = "0.4.1"
futures = "0.3"
futures-util = "0.3"
futures-core = "0.3"
prost = "0.13.0"
prost-types = "0.13.0"
serde = { version = "1.0", features = ["derive"] }
supabase-wrappers = { version = "=0.1.22", default-features = false, features = ["pg15"] }
thiserror = "2.0.3"
tokio = { version = "1", features = ["io-util", "macros", "rt-multi-thread"], default-features = false }
tokio-stream = { version = "0.1", features = ["sync"] }
tonbo = { path = ".." }
tonic = "0.13"
ulid = { version = "1", features = ["serde"] }

[dev-dependencies]
tempfile = "3"

[build-dependencies]
tonic-build = "0.13"
1 change: 1 addition & 0 deletions cloud/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Tonbo's cloud architecture. This is currently in development.
7 changes: 7 additions & 0 deletions cloud/build.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_server(true)
.out_dir("src/gen")
.compile_protos(&["src/proto/cloud.proto"], &["src/proto"])?;
Ok(())
}
42 changes: 42 additions & 0 deletions cloud/src/aws/aws_tonbo_svc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
use std::sync::Arc;

use tonic::Response;

use crate::{
aws::{AWSTonbo, AwsTonboRPC},
gen::grpc,
ScanRequest,
};

#[derive(Clone)]
pub struct AWSTonboSvc {
inner: Arc<AWSTonbo>,
}

impl AWSTonboSvc {
pub fn new(tonbo: Arc<AWSTonbo>) -> Self {
AWSTonboSvc { inner: tonbo }
}
}

#[tonic::async_trait]
impl AwsTonboRPC for AWSTonboSvc {
async fn get_parquet_metadata(
&self,
request: tonic::Request<grpc::ScanRequest>,
) -> Result<tonic::Response<grpc::ParquetMetadata>, tonic::Status> {
let tx = self.inner.tonbo.transaction().await;
let scan_request = ScanRequest::from(request.into_inner());

let (row_count, row_size) = self
.inner
.parquet_metadata(&tx, &scan_request)
.await
.unwrap();
let meta = grpc::ParquetMetadata {
row_count,
row_size,
};
Ok(Response::new(meta))
}
}
303 changes: 303 additions & 0 deletions cloud/src/aws/flight_svc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,303 @@
use std::sync::Arc;

use arrow_array::RecordBatch;
use arrow_flight::{
encode::FlightDataEncoderBuilder, error::FlightError, flight_service_server::FlightService,
Action, ActionType, Criteria, Empty, FlightData, FlightDescriptor, FlightInfo,
HandshakeRequest, HandshakeResponse, PollInfo, PutResult, SchemaResult, Ticket,
};
use futures::{stream::BoxStream, StreamExt};
use prost::Message;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
use tonbo::{
record::{
util::records_to_record_batch, ArrowArrays, ArrowArraysBuilder, DynRecord,
DynRecordImmutableArrays, Record, RecordRef,
},
Entry,
};
use tonic::{Request, Response, Status, Streaming};

use crate::{aws::AWSTonbo, gen::grpc, ScanRequest, TonboCloud};

#[derive(Clone)]
pub struct TonboFlightSvc {
inner: Arc<AWSTonbo>,
}

impl TonboFlightSvc {
pub fn new(tonbo: Arc<AWSTonbo>) -> Self {
TonboFlightSvc { inner: tonbo }
}
}

#[tonic::async_trait]
impl FlightService for TonboFlightSvc {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think Arrow Flight IPC make things simple at this stage, we can move forward on relying Arrow Flight.

Copy link
Member

@ethe ethe Aug 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shall we introduce Flight SQL in Tonbo Cloud? Arrow Flight can not support clauses pushdown, which is really important to our core intent.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let me take a look into fligtht sql

type HandshakeStream = BoxStream<'static, Result<HandshakeResponse, Status>>;
type ListFlightsStream = BoxStream<'static, Result<FlightInfo, Status>>;
type DoGetStream = BoxStream<'static, Result<FlightData, Status>>;
type DoPutStream = BoxStream<'static, Result<PutResult, Status>>;
type DoActionStream = BoxStream<'static, Result<arrow_flight::Result, Status>>;
type ListActionsStream = BoxStream<'static, Result<ActionType, Status>>;
type DoExchangeStream = BoxStream<'static, Result<FlightData, Status>>;

async fn get_schema(
&self,
_request: Request<FlightDescriptor>,
) -> Result<Response<SchemaResult>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

// Scans Tonbo for record batches matching the scan predicates
async fn do_get(
&self,
request: Request<Ticket>,
) -> Result<Response<Self::DoGetStream>, Status> {
// Unparse ticket to `ScanRequest`
let ticket = request.into_inner().ticket;
let scan_pb = grpc::ScanRequest::decode(ticket.as_ref()).map_err(|e| {
Status::invalid_argument(format!("Expected Ticket to be grpc::ScanRequest: {}", e))
})?;
let scan = ScanRequest::from(scan_pb);

// Create channel for RecordBatches + schema
let (rb_tx, rb_rx) = mpsc::channel::<Result<RecordBatch, FlightError>>(32);
let (schema_tx, schema_rx) = oneshot::channel();

let inner = Arc::clone(&self.inner);

tokio::spawn(async move {
let txn = inner.tonbo.transaction().await;

let mut entries = match inner.read(&txn, &scan).await {
Ok(s) => s,
Err(e) => {
let _ = rb_tx
.send(Err(FlightError::ExternalError(e.to_string().into())))
.await;
return;
}
};

let mut schema_builder: Vec<(u32, DynRecord)> = vec![];
// Retrieve first batch and send schema
let first_batch = loop {
match entries.next().await {
Some(Ok(Entry::RecordBatch(record_batch))) => {
// break after finding first batch
break record_batch.record_batch().clone();
}
Some(Ok(Entry::Mutable(entry))) => {
// Send record batch to channel
if let Some(record) = entry.value() {
// use dummy ts as it doesn't matter when converted to `RecordBatch`
schema_builder.push((0, (*record).clone()));
break records_to_record_batch(
&schema_builder[0].1.schema(0),
schema_builder,
);
}
}
Some(Ok(Entry::Transaction((_, record)))) => {
if let Some(record) = record {
// use dummy ts as it doesn't matter when converted to `RecordBatch`
schema_builder.push((0, (*record).clone()));
break records_to_record_batch(
&schema_builder[0].1.schema(0),
schema_builder,
);
}
}
// TODO: deal with projection
Some(Ok(Entry::Projection((record, projection)))) => {
match *record {
// TODO: Make more efficient by batching build batch tranformation
Entry::RecordBatch(entry) => {
let schema = entry.batch_as_ref().schema();
let value = entry.get();
if let Some(mut value) = value {
let mut dyn_record_builder =
DynRecordImmutableArrays::builder(schema, 1);

// Apply projection
value.projection(&projection);
dyn_record_builder.push(entry.internal_key(), Some(value));
let dyn_record_array = dyn_record_builder.finish(None);
let record_batch = dyn_record_array.as_record_batch();

break record_batch.clone();
}
}
_ => {
let dyn_record = record.owned_value();

if let Some(mut dyn_record) = dyn_record {
dyn_record.projection(&projection);
schema_builder.push((0, dyn_record.clone()));
break records_to_record_batch(
&schema_builder[0].1.schema(0),
schema_builder,
);
}
}
}
}
Some(Err(e)) => {
let _ = rb_tx
.send(Err(FlightError::ExternalError(e.to_string().into())))
.await;
return;
}
None => {
return;
}
}
};

let _ = schema_tx.send(first_batch.schema());

if rb_tx.send(Ok(first_batch)).await.is_err() {
return;
}

let mut batch_builder: Vec<(u32, DynRecord)> = vec![];

while let Some(item) = entries.next().await {
match item {
Ok(Entry::RecordBatch(record_batch)) => {
// Send record batch to channel
if rb_tx
.send(Ok(record_batch.record_batch().clone()))
.await
.is_err()
{
return;
}
}
Ok(Entry::Mutable(entry)) => {
// Send record batch to channel
if let Some(record) = entry.value() {
// use dummy ts as it doesn't matter when converted to `RecordBatch`
batch_builder.push((0, (*record).clone()));
}
}
Ok(Entry::Transaction((_, record))) => {
if let Some(record) = record {
// use dummy ts as it doesn't matter when converted to `RecordBatch`
batch_builder.push((0, (*record).clone()));
}
}
Ok(Entry::Projection((record, projection))) => {
match *record {
// TODO: Make more efficient by batching build batch tranformation
Entry::RecordBatch(entry) => {
let schema = entry.batch_as_ref().schema();
let value = entry.get();
if let Some(mut value) = value {
let mut dyn_record_builder =
DynRecordImmutableArrays::builder(schema, 1);

// Apply projection
value.projection(&projection);
dyn_record_builder.push(entry.internal_key(), Some(value));
let dyn_record_array = dyn_record_builder.finish(None);
let record_batch = dyn_record_array.as_record_batch();

rb_tx.send(Ok(record_batch.clone())).await.unwrap();
}
}
_ => {
let dyn_record = record.owned_value();

if let Some(mut dyn_record) = dyn_record {
dyn_record.projection(&projection);
batch_builder.push((0, dyn_record));
}
}
}
}
Err(e) => {
let _ = rb_tx
.send(Err(FlightError::ExternalError(e.to_string().into())))
.await;
return;
}
}
}
if !batch_builder.is_empty() {
let build_batch =
records_to_record_batch(&batch_builder[0].1.schema(0), batch_builder);
rb_tx.send(Ok(build_batch)).await.unwrap();
}
});

let schema = schema_rx
.await
.map_err(|_| Status::internal("failed to get schema"))?;
let rb_stream = ReceiverStream::new(rb_rx);

let fd_stream = FlightDataEncoderBuilder::new()
.with_schema(schema)
.build(rb_stream);

let out = fd_stream.map(|res| res.map_err(|e| Status::internal(e.to_string())));
Ok(Response::new(Box::pin(out)))
}

async fn handshake(
&self,
_request: Request<Streaming<HandshakeRequest>>,
) -> Result<Response<Self::HandshakeStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn list_flights(
&self,
_request: Request<Criteria>,
) -> Result<Response<Self::ListFlightsStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn get_flight_info(
&self,
_request: Request<FlightDescriptor>,
) -> Result<Response<FlightInfo>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn do_put(
&self,
_request: Request<Streaming<FlightData>>,
) -> Result<Response<Self::DoPutStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn do_action(
&self,
_request: Request<Action>,
) -> Result<Response<Self::DoActionStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn list_actions(
&self,
_request: Request<Empty>,
) -> Result<Response<Self::ListActionsStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn do_exchange(
&self,
_request: Request<Streaming<FlightData>>,
) -> Result<Response<Self::DoExchangeStream>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}

async fn poll_flight_info(
&self,
_request: Request<FlightDescriptor>,
) -> Result<Response<PollInfo>, Status> {
Err(Status::unimplemented("Not yet implemented"))
}
}
Loading
Loading