Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,12 @@ jobs:
RUST_LOG: DEBUG
RUST_BACKTRACE: full

- name: DataFusion Integration Test
run: cargo test -p paimon-datafusion --test read_tables
env:
RUST_LOG: DEBUG
RUST_BACKTRACE: full

- name: Go Integration Test
working-directory: bindings/go
run: make test
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

[workspace]
resolver = "2"
members = ["crates/paimon", "crates/integration_tests", "bindings/c"]
members = ["crates/paimon", "crates/integration_tests", "bindings/c", "crates/integrations/datafusion"]

[workspace.package]
version = "0.0.0"
Expand Down
34 changes: 34 additions & 0 deletions crates/integrations/datafusion/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

[package]
name = "paimon-datafusion"
edition.workspace = true
version.workspace = true
license.workspace = true
description = "Apache Paimon DataFusion Integration (read-only)"
categories = ["database"]
keywords = ["paimon", "datafusion", "integrations"]

[dependencies]
async-trait = "0.1"
datafusion = { version = "52.3.0"}
paimon = { path = "../../paimon" }
futures = "0.3"

[dev-dependencies]
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
23 changes: 23 additions & 0 deletions crates/integrations/datafusion/src/error.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use datafusion::common::error::GenericError;

/// Converts a Paimon error into a DataFusion error.
pub fn to_datafusion_error(error: paimon::Error) -> datafusion::error::DataFusionError {
datafusion::error::DataFusionError::External(GenericError::from(error))
}
46 changes: 46 additions & 0 deletions crates/integrations/datafusion/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Apache Paimon DataFusion Integration (read-only).
//!
//! Register a Paimon table as a DataFusion table provider to query it with SQL or DataFrame API.
//!
//! # Example
//!
//! ```ignore
//! use std::sync::Arc;
//! use datafusion::prelude::SessionContext;
//! use paimon_datafusion::PaimonTableProvider;
//!
//! // Obtain a Paimon Table (e.g. from your catalog), then:
//! let provider = PaimonTableProvider::try_new(table)?;
//! let ctx = SessionContext::new();
//! ctx.register_table("my_table", Arc::new(provider))?;
//! let df = ctx.sql("SELECT * FROM my_table").await?;
//! ```
//!
//! This version does not support write, column projection, or predicate pushdown.

mod error;
mod physical_plan;
mod schema;
mod table;

pub use error::to_datafusion_error;
pub use physical_plan::PaimonTableScan;
pub use schema::paimon_schema_to_arrow;
pub use table::PaimonTableProvider;
20 changes: 20 additions & 0 deletions crates/integrations/datafusion/src/physical_plan/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

pub(crate) mod scan;

pub use scan::PaimonTableScan;
125 changes: 125 additions & 0 deletions crates/integrations/datafusion/src/physical_plan/scan.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use std::any::Any;
use std::sync::Arc;

use datafusion::arrow::datatypes::SchemaRef as ArrowSchemaRef;
use datafusion::error::Result as DFResult;
use datafusion::execution::{SendableRecordBatchStream, TaskContext};
use datafusion::physical_expr::EquivalenceProperties;
use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter;
use datafusion::physical_plan::{DisplayAs, ExecutionPlan, Partitioning, PlanProperties};
use futures::{StreamExt, TryStreamExt};
use paimon::table::Table;

use crate::error::to_datafusion_error;

/// Execution plan that scans a Paimon table (read-only, no projection, no predicate, no limit).
#[derive(Debug)]
pub struct PaimonTableScan {
table: Table,
plan_properties: PlanProperties,
}

impl PaimonTableScan {
pub(crate) fn new(schema: ArrowSchemaRef, table: Table) -> Self {
let plan_properties = PlanProperties::new(
EquivalenceProperties::new(schema.clone()),
// TODO: Currently all Paimon splits are read in a single DataFusion partition,
// which means we lose DataFusion parallelism. A follow-up should expose one
// execution partition per Paimon split so that DataFusion can schedule them
// across threads.
Partitioning::UnknownPartitioning(1),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the single-partition execution intentional for the initial version?

PaimonTableScan hardcodes UnknownPartitioning(1), and execute() plans/reads all Paimon splits inside one execution partition. Since the underlying Paimon scan already produces bin-packed splits, this means we lose DataFusion parallelism completely.

Not necessarily a blocker for the first PR, but I think this limitation should be called out explicitly (e.g. a // TODO comment), or followed up by exposing one execution partition per Paimon split.

EmissionType::Incremental,
Boundedness::Bounded,
);
Self {
table,
plan_properties,
}
}

pub fn table(&self) -> &Table {
&self.table
}
}

impl ExecutionPlan for PaimonTableScan {
fn name(&self) -> &str {
"PaimonTableScan"
}

fn as_any(&self) -> &dyn Any {
self
}

fn properties(&self) -> &PlanProperties {
&self.plan_properties
}

fn children(&self) -> Vec<&Arc<dyn ExecutionPlan + 'static>> {
vec![]
}

fn with_new_children(
self: Arc<Self>,
_children: Vec<Arc<dyn ExecutionPlan>>,
) -> DFResult<Arc<dyn ExecutionPlan>> {
Ok(self)
}

fn execute(
&self,
_partition: usize,
_context: Arc<TaskContext>,
) -> DFResult<SendableRecordBatchStream> {
let table = self.table.clone();
let schema = self.schema();

let fut = async move {
let read_builder = table.new_read_builder();
let scan = read_builder.new_scan();
let plan = scan.plan().await.map_err(to_datafusion_error)?;
let read = read_builder.new_read().map_err(to_datafusion_error)?;
let stream = read.to_arrow(plan.splits()).map_err(to_datafusion_error)?;
let stream = stream.map(|r| r.map_err(to_datafusion_error));

Ok::<_, datafusion::error::DataFusionError>(RecordBatchStreamAdapter::new(
schema,
Box::pin(stream),
))
};

let stream = futures::stream::once(fut).try_flatten();
Ok(Box::pin(RecordBatchStreamAdapter::new(
self.schema(),
stream,
)))
}
}

impl DisplayAs for PaimonTableScan {
fn fmt_as(
&self,
_t: datafusion::physical_plan::DisplayFormatType,
f: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(f, "PaimonTableScan")
}
}
116 changes: 116 additions & 0 deletions crates/integrations/datafusion/src/schema.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::common::DataFusionError;
use datafusion::common::Result as DFResult;
use std::sync::Arc;

use paimon::spec::{DataField, DataType as PaimonDataType};

/// Converts Paimon table schema (logical row type fields) to DataFusion Arrow schema.
pub fn paimon_schema_to_arrow(fields: &[DataField]) -> DFResult<Arc<Schema>> {
let arrow_fields: Vec<Field> = fields
.iter()
.map(|f| {
let arrow_type = paimon_data_type_to_arrow(f.data_type())?;
Ok(Field::new(
f.name(),
arrow_type,
f.data_type().is_nullable(),
))
})
.collect::<DFResult<Vec<_>>>()?;
Ok(Arc::new(Schema::new(arrow_fields)))
}

fn paimon_data_type_to_arrow(dt: &PaimonDataType) -> DFResult<DataType> {
use datafusion::arrow::datatypes::TimeUnit;

Ok(match dt {
PaimonDataType::Boolean(_) => DataType::Boolean,
PaimonDataType::TinyInt(_) => DataType::Int8,
PaimonDataType::SmallInt(_) => DataType::Int16,
PaimonDataType::Int(_) => DataType::Int32,
PaimonDataType::BigInt(_) => DataType::Int64,
PaimonDataType::Float(_) => DataType::Float32,
PaimonDataType::Double(_) => DataType::Float64,
PaimonDataType::VarChar(_) | PaimonDataType::Char(_) => DataType::Utf8,
PaimonDataType::Binary(_) | PaimonDataType::VarBinary(_) => DataType::Binary,
PaimonDataType::Date(_) => DataType::Date32,
PaimonDataType::Time(t) => match t.precision() {
// `read.to_arrow(...)` goes through the Parquet Arrow reader, which exposes INT32
// TIME values as millisecond precision only. Mirror that here so provider schema and
// runtime RecordBatch schema stay identical.
0..=3 => DataType::Time32(TimeUnit::Millisecond),
4..=6 => DataType::Time64(TimeUnit::Microsecond),
7..=9 => DataType::Time64(TimeUnit::Nanosecond),
precision => {
return Err(DataFusionError::Internal(format!(
"Unsupported TIME precision {precision}"
)));
}
},
PaimonDataType::Timestamp(t) => {
DataType::Timestamp(timestamp_time_unit(t.precision())?, None)
}
PaimonDataType::LocalZonedTimestamp(t) => {
DataType::Timestamp(timestamp_time_unit(t.precision())?, Some("UTC".into()))
}
PaimonDataType::Decimal(d) => {
let p = u8::try_from(d.precision()).map_err(|_| {
DataFusionError::Internal("Decimal precision exceeds u8".to_string())
})?;
let s = i8::try_from(d.scale() as i32).map_err(|_| {
DataFusionError::Internal("Decimal scale out of i8 range".to_string())
})?;
match d.precision() {
// The Parquet Arrow reader normalizes DECIMAL columns to Decimal128 regardless of
// Parquet physical storage width. Mirror that here to avoid DataFusion schema
// mismatch between `TableProvider::schema()` and `execute()` output.
1..=38 => DataType::Decimal128(p, s),
precision => {
return Err(DataFusionError::Internal(format!(
"Unsupported DECIMAL precision {precision}"
)));
}
}
}
PaimonDataType::Array(_)
| PaimonDataType::Map(_)
| PaimonDataType::Multiset(_)
| PaimonDataType::Row(_) => {
return Err(DataFusionError::NotImplemented(
"Paimon DataFusion integration does not yet support nested types (Array/Map/Row)"
.to_string(),
));
}
})
}

fn timestamp_time_unit(precision: u32) -> DFResult<datafusion::arrow::datatypes::TimeUnit> {
use datafusion::arrow::datatypes::TimeUnit;

match precision {
0..=3 => Ok(TimeUnit::Millisecond),
4..=6 => Ok(TimeUnit::Microsecond),
7..=9 => Ok(TimeUnit::Nanosecond),
_ => Err(DataFusionError::Internal(format!(
"Unsupported TIMESTAMP precision {precision}"
))),
}
}
Loading
Loading