Skip to content

Update to object_store 0.4 #3089

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Merged
merged 2 commits into from
Aug 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion datafusion/common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ pyarrow = ["pyo3"]
apache-avro = { version = "0.14", features = ["snappy"], optional = true }
arrow = { version = "19.0.0", features = ["prettyprint"] }
cranelift-module = { version = "0.86.1", optional = true }
object_store = { version = "0.3", optional = true }
object_store = { version = "0.4", optional = true }
ordered-float = "3.0"
parquet = { version = "19.0.0", features = ["arrow"], optional = true }
pyo3 = { version = "0.16", optional = true }
Expand Down
2 changes: 1 addition & 1 deletion datafusion/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ lazy_static = { version = "^1.4.0" }
log = "^0.4"
num-traits = { version = "0.2", optional = true }
num_cpus = "1.13.0"
object_store = "0.3.0"
object_store = "0.4.0"
ordered-float = "3.0"
parking_lot = "0.12"
parquet = { version = "19.0.0", features = ["arrow", "async"] }
Expand Down
19 changes: 18 additions & 1 deletion datafusion/core/src/datasource/file_format/parquet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,8 @@ mod tests {
use futures::StreamExt;
use object_store::local::LocalFileSystem;
use object_store::path::Path;
use object_store::{GetResult, ListResult};
use object_store::{GetResult, ListResult, MultipartId};
use tokio::io::AsyncWrite;

#[tokio::test]
async fn read_merged_batches() -> Result<()> {
Expand Down Expand Up @@ -649,6 +650,22 @@ mod tests {
Err(object_store::Error::NotImplemented)
}

async fn put_multipart(
&self,
_location: &Path,
) -> object_store::Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)>
{
Err(object_store::Error::NotImplemented)
}

async fn abort_multipart(
&self,
_location: &Path,
_multipart_id: &MultipartId,
) -> object_store::Result<()> {
Err(object_store::Error::NotImplemented)
}

async fn get(&self, _location: &Path) -> object_store::Result<GetResult> {
Err(object_store::Error::NotImplemented)
}
Expand Down
18 changes: 17 additions & 1 deletion datafusion/core/src/physical_plan/file_format/chunked_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@ use bytes::Bytes;
use futures::stream::BoxStream;
use futures::StreamExt;
use object_store::path::Path;
use object_store::Result;
use object_store::{GetResult, ListResult, ObjectMeta, ObjectStore};
use object_store::{MultipartId, Result};
use std::fmt::{Debug, Display, Formatter};
use std::ops::Range;
use std::sync::Arc;
use tokio::io::AsyncWrite;

/// Wraps a [`ObjectStore`] and makes its get response return chunks
///
Expand Down Expand Up @@ -53,6 +54,21 @@ impl ObjectStore for ChunkedStore {
self.inner.put(location, bytes).await
}

async fn put_multipart(
&self,
location: &Path,
) -> Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
self.inner.put_multipart(location).await
}

async fn abort_multipart(
&self,
location: &Path,
multipart_id: &MultipartId,
) -> Result<()> {
self.inner.abort_multipart(location, multipart_id).await
}

async fn get(&self, location: &Path) -> Result<GetResult> {
let bytes = self.inner.get(location).await?.bytes().await?;
let mut offset = 0;
Expand Down
24 changes: 24 additions & 0 deletions datafusion/core/src/physical_plan/file_format/parquet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,30 @@ impl AsyncFileReader for ParquetFileReader {
.boxed()
}

fn get_byte_ranges(
&mut self,
ranges: Vec<Range<usize>>,
) -> BoxFuture<'_, parquet::errors::Result<Vec<Bytes>>>
where
Self: Send,
{
let total = ranges.iter().map(|r| r.end - r.start).sum();
self.metrics.bytes_scanned.add(total);

async move {
self.store
.get_ranges(&self.meta.location, &ranges)
.await
.map_err(|e| {
ParquetError::General(format!(
"AsyncChunkReader::get_byte_ranges error: {}",
e
))
})
}
.boxed()
}

fn get_metadata(
&mut self,
) -> BoxFuture<'_, parquet::errors::Result<Arc<ParquetMetaData>>> {
Expand Down
20 changes: 19 additions & 1 deletion datafusion/core/tests/path_partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ use datafusion::{
use datafusion_common::ScalarValue;
use futures::stream::BoxStream;
use futures::{stream, StreamExt};
use object_store::{path::Path, GetResult, ListResult, ObjectMeta, ObjectStore};
use object_store::{
path::Path, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
};
use tokio::io::AsyncWrite;

#[tokio::test]
async fn parquet_distinct_partition_col() -> Result<()> {
Expand Down Expand Up @@ -516,6 +519,21 @@ impl ObjectStore for MirroringObjectStore {
unimplemented!()
}

async fn put_multipart(
&self,
_location: &Path,
) -> object_store::Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
unimplemented!()
}

async fn abort_multipart(
&self,
_location: &Path,
_multipart_id: &MultipartId,
) -> object_store::Result<()> {
unimplemented!()
}

async fn get(&self, location: &Path) -> object_store::Result<GetResult> {
self.files.iter().find(|x| *x == location.as_ref()).unwrap();
let path = std::path::PathBuf::from(&self.mirrored_file);
Expand Down