20 #include <cudf/io/detail/parquet.hpp>
24 #include <cudf/utilities/export.hpp>
36 namespace CUDF_EXPORT
cudf {
61 std::optional<std::vector<std::string>> _columns;
64 std::vector<std::vector<size_type>> _row_groups;
66 int64_t _skip_rows = 0;
68 std::optional<size_type> _num_rows;
71 std::optional<std::reference_wrapper<ast::expression const>> _filter;
74 bool _convert_strings_to_categories =
false;
76 bool _use_pandas_metadata =
true;
78 bool _use_arrow_schema =
true;
80 data_type _timestamp_type{type_id::EMPTY};
82 std::optional<std::vector<reader_column_schema>> _reader_column_schema;
124 return _convert_strings_to_categories;
148 return _reader_column_schema;
164 [[nodiscard]] std::optional<size_type>
const&
get_num_rows()
const {
return _num_rows; }
171 [[nodiscard]]
auto const&
get_columns()
const {
return _columns; }
185 [[nodiscard]]
auto const&
get_filter()
const {
return _filter; }
199 void set_columns(std::vector<std::string> col_names) { _columns = std::move(col_names); }
269 _reader_column_schema = std::move(val);
323 options._columns = std::move(col_names);
357 options._convert_strings_to_categories = val;
369 options._use_pandas_metadata = val;
381 options._use_arrow_schema = val;
393 options._reader_column_schema = std::move(val);
429 options._timestamp_type = type;
503 std::size_t chunk_read_limit,
528 std::size_t chunk_read_limit,
529 std::size_t pass_read_limit,
564 std::unique_ptr<cudf::io::parquet::detail::chunked_reader> reader;
579 bool is_descending{
false};
580 bool is_nulls_first{
true};
594 std::optional<table_input_metadata> _metadata;
596 std::vector<std::map<std::string, std::string>> _user_data;
599 bool _write_timestamps_as_int96 =
false;
602 bool _write_timestamps_as_UTC =
true;
604 bool _write_arrow_schema =
false;
620 std::optional<size_type> _max_page_fragment_size;
622 std::shared_ptr<writer_compression_statistics> _compression_stats;
624 bool _v2_page_headers =
false;
626 std::optional<std::vector<sorting_column>> _sorting_columns;
727 return std::min(_max_page_size_bytes, get_row_group_size_bytes());
739 return std::min(_max_page_size_rows, get_row_group_size_rows());
749 return _column_index_truncate_length;
780 return _compression_stats;
927 template <
class BuilderT,
class OptionsT>
1089 std::shared_ptr<writer_compression_statistics>
const& comp_stats);
1134 operator OptionsT&&();
1155 std::vector<partition_info> _partitions;
1157 std::vector<std::string> _column_chunks_file_paths;
1206 [[nodiscard]] std::vector<partition_info>
const&
get_partitions()
const {
return _partitions; }
1215 return _column_chunks_file_paths;
1240 parquet_writer_options> {
1305 std::vector<std::unique_ptr<std::vector<uint8_t>>>
const& metadata_list);
1345 chunked_parquet_writer_options> {
1416 std::vector<partition_info>
const& partitions = {});
1426 std::unique_ptr<std::vector<uint8_t>>
close(
1427 std::vector<std::string>
const& column_chunks_file_paths = {});
1430 std::unique_ptr<parquet::detail::writer>
writer;
Indicator for the logical data type of an element in a column.
The chunked parquet reader class to read Parquet file iteratively in to a series of tables,...
table_with_metadata read_chunk() const
Read a chunk of rows in the given Parquet file.
bool has_next() const
Check if there is any data in the given file has not yet read.
chunked_parquet_reader(std::size_t chunk_read_limit, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=rmm::mr::get_current_device_resource())
Constructor for chunked reader.
chunked_parquet_reader(std::size_t chunk_read_limit, std::size_t pass_read_limit, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=rmm::mr::get_current_device_resource())
Constructor for chunked reader.
~chunked_parquet_reader()
Destructor, destroying the internal reader instance.
chunked_parquet_reader()
Default constructor, this should never be used.
Class to build chunked_parquet_writer_options.
chunked_parquet_writer_options_builder()=default
Default constructor.
chunked_parquet_writer_options_builder(sink_info const &sink)
Constructor from sink.
Settings for parquet_chunked_writer.
static chunked_parquet_writer_options_builder builder(sink_info const &sink)
creates builder to build chunked_parquet_writer_options.
chunked_parquet_writer_options()=default
Default constructor.
chunked parquet writer class to handle options and write tables in chunks.
std::unique_ptr< std::vector< uint8_t > > close(std::vector< std::string > const &column_chunks_file_paths={})
Finishes the chunked/streamed write process.
parquet_chunked_writer()
Default constructor, this should never be used. This is added just to satisfy cython....
~parquet_chunked_writer()
Default destructor. This is added to not leak detail API.
std::unique_ptr< parquet::detail::writer > writer
Unique pointer to impl writer class.
parquet_chunked_writer & write(table_view const &table, std::vector< partition_info > const &partitions={})
Writes table to output.
parquet_chunked_writer(chunked_parquet_writer_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream())
Constructor with chunked writer options.
Builds parquet_reader_options to use for read_parquet().
parquet_reader_options_builder & use_arrow_schema(bool val)
Sets to enable/disable use of arrow schema to read.
parquet_reader_options_builder(source_info src)
Constructor from source info.
parquet_reader_options_builder & skip_rows(int64_t val)
Sets number of rows to skip.
parquet_reader_options_builder & columns(std::vector< std::string > col_names)
Sets names of the columns to be read.
parquet_reader_options_builder & timestamp_type(data_type type)
timestamp_type used to cast timestamp columns.
parquet_reader_options_builder & use_pandas_metadata(bool val)
Sets to enable/disable use of pandas metadata to read.
parquet_reader_options_builder()=default
Default constructor.
parquet_reader_options_builder & row_groups(std::vector< std::vector< size_type >> row_groups)
Sets vector of individual row groups to read.
parquet_reader_options_builder & set_column_schema(std::vector< reader_column_schema > val)
Sets reader metadata.
parquet_reader_options && build()
move parquet_reader_options member once it's built.
parquet_reader_options_builder & filter(ast::expression const &filter)
Sets AST based filter for predicate pushdown.
parquet_reader_options_builder & num_rows(size_type val)
Sets number of rows to read.
parquet_reader_options_builder & convert_strings_to_categories(bool val)
Sets enable/disable conversion of strings to categories.
Settings for read_parquet().
data_type get_timestamp_type() const
Returns timestamp type used to cast timestamp columns.
parquet_reader_options()=default
Default constructor.
static parquet_reader_options_builder builder(source_info src)
Creates a parquet_reader_options_builder which will build parquet_reader_options.
void set_skip_rows(int64_t val)
Sets number of rows to skip.
void set_columns(std::vector< std::string > col_names)
Sets names of the columns to be read.
void enable_convert_strings_to_categories(bool val)
Sets to enable/disable conversion of strings to categories.
std::optional< std::vector< reader_column_schema > > get_column_schema() const
Returns optional tree of metadata.
source_info const & get_source() const
Returns source info.
auto const & get_row_groups() const
Returns list of individual row groups to be read.
std::optional< size_type > const & get_num_rows() const
Returns number of rows to read.
void set_row_groups(std::vector< std::vector< size_type >> row_groups)
Sets vector of individual row groups to read.
void set_num_rows(size_type val)
Sets number of rows to read.
auto const & get_columns() const
Returns names of column to be read, if set.
void set_timestamp_type(data_type type)
Sets timestamp_type used to cast timestamp columns.
bool is_enabled_convert_strings_to_categories() const
Returns true/false depending on whether strings should be converted to categories or not.
void enable_use_pandas_metadata(bool val)
Sets to enable/disable use of pandas metadata to read.
void enable_use_arrow_schema(bool val)
Sets to enable/disable use of arrow schema to read.
bool is_enabled_use_pandas_metadata() const
Returns true/false depending whether to use pandas metadata or not while reading.
void set_column_schema(std::vector< reader_column_schema > val)
Sets reader column schema.
bool is_enabled_use_arrow_schema() const
Returns true/false depending whether to use arrow schema while reading.
void set_filter(ast::expression const &filter)
Sets AST based filter for predicate pushdown.
auto const & get_filter() const
Returns AST based filter for predicate pushdown.
int64_t get_skip_rows() const
Returns number of rows to skip from the start.
Base settings for write_parquet() and parquet_chunked_writer.
void enable_utc_timestamps(bool val)
Sets preference for writing timestamps as UTC. Write timestamps as UTC if set to true.
void enable_write_v2_headers(bool val)
Sets preference for V2 page headers. Write V2 page headers if set to true.
auto const & get_sorting_columns() const
Returns the sorting_columns.
auto get_row_group_size_bytes() const
Returns maximum row group size, in bytes.
bool is_enabled_int96_timestamps() const
Returns true if timestamps will be written as INT96.
void set_metadata(table_input_metadata metadata)
Sets metadata.
void set_row_group_size_rows(size_type size_rows)
Sets the maximum row group size, in rows.
parquet_writer_options_base(sink_info sink)
Constructor from sink.
void set_stats_level(statistics_freq sf)
Sets the level of statistics.
auto get_row_group_size_rows() const
Returns maximum row group size, in rows.
parquet_writer_options_base()=default
Default constructor.
void set_max_page_size_bytes(size_t size_bytes)
Sets the maximum uncompressed page size, in bytes.
void set_sorting_columns(std::vector< sorting_column > sorting_columns)
Sets sorting columns.
auto is_enabled_write_arrow_schema() const
Returns true if arrow schema will be written.
auto is_enabled_write_v2_headers() const
Returns true if V2 page headers should be written.
void set_dictionary_policy(dictionary_policy policy)
Sets the policy for dictionary use.
auto get_max_page_size_bytes() const
Returns the maximum uncompressed page size, in bytes.
void set_max_dictionary_size(size_t size_bytes)
Sets the maximum dictionary size, in bytes.
compression_type get_compression() const
Returns compression format used.
auto get_max_dictionary_size() const
Returns maximum dictionary size, in bytes.
void set_compression(compression_type compression)
Sets compression type.
dictionary_policy get_dictionary_policy() const
Returns policy for dictionary use.
void set_compression_statistics(std::shared_ptr< writer_compression_statistics > comp_stats)
Sets the pointer to the output compression statistics.
std::shared_ptr< writer_compression_statistics > get_compression_statistics() const
Returns a shared pointer to the user-provided compression statistics.
void set_max_page_size_rows(size_type size_rows)
Sets the maximum page size, in rows.
auto get_max_page_fragment_size() const
Returns maximum page fragment size, in rows.
void set_key_value_metadata(std::vector< std::map< std::string, std::string >> metadata)
Sets metadata.
void set_max_page_fragment_size(size_type size_rows)
Sets the maximum page fragment size, in rows.
void enable_write_arrow_schema(bool val)
Sets preference for writing arrow schema. Write arrow schema if set to true.
auto is_enabled_utc_timestamps() const
Returns true if timestamps will be written as UTC.
void set_row_group_size_bytes(size_t size_bytes)
Sets the maximum row group size, in bytes.
void enable_int96_timestamps(bool req)
Sets timestamp writing preferences. INT96 timestamps will be written if true and TIMESTAMP_MICROS wil...
statistics_freq get_stats_level() const
Returns level of statistics requested in output file.
std::vector< std::map< std::string, std::string > > const & get_key_value_metadata() const
Returns Key-Value footer metadata information.
auto const & get_metadata() const
Returns associated metadata.
auto get_max_page_size_rows() const
Returns maximum page size, in rows.
auto get_column_index_truncate_length() const
Returns maximum length of min or max values in column index, in bytes.
void set_column_index_truncate_length(int32_t size_bytes)
Sets the maximum length of min or max values in column index, in bytes.
sink_info const & get_sink() const
Returns sink info.
Base class for Parquet options builders.
BuilderT & compression(compression_type compression)
Sets compression type.
BuilderT & key_value_metadata(std::vector< std::map< std::string, std::string >> metadata)
Sets Key-Value footer metadata.
OptionsT & get_options()
Return reference to the options object being built.
BuilderT & utc_timestamps(bool enabled)
Set to true if timestamps are to be written as UTC.
BuilderT & max_dictionary_size(size_t val)
Sets the maximum dictionary size, in bytes.
BuilderT & max_page_size_bytes(size_t val)
Sets the maximum uncompressed page size, in bytes.
OptionsT && build()
move options member once it's built.
BuilderT & stats_level(statistics_freq sf)
Sets the level of statistics.
BuilderT & column_index_truncate_length(int32_t val)
Sets the desired maximum size in bytes for min and max values in the column index.
BuilderT & compression_statistics(std::shared_ptr< writer_compression_statistics > const &comp_stats)
Sets the pointer to the output compression statistics.
BuilderT & metadata(table_input_metadata metadata)
Sets metadata.
BuilderT & dictionary_policy(enum dictionary_policy val)
Sets the policy for dictionary use.
parquet_writer_options_builder_base(OptionsT options)
Constructor from options.
BuilderT & int96_timestamps(bool enabled)
Sets whether int96 timestamps are written or not.
BuilderT & row_group_size_bytes(size_t val)
Sets the maximum row group size, in bytes.
BuilderT & sorting_columns(std::vector< sorting_column > sorting_columns)
Sets column sorting metadata.
BuilderT & write_arrow_schema(bool enabled)
Set to true if arrow schema is to be written.
parquet_writer_options_builder_base()=default
Default constructor.
BuilderT & write_v2_headers(bool enabled)
Set to true if V2 page headers are to be written.
BuilderT & max_page_fragment_size(size_type val)
Sets the maximum page fragment size, in rows.
BuilderT & row_group_size_rows(size_type val)
Sets the maximum number of rows in output row groups.
BuilderT & max_page_size_rows(size_type val)
Sets the maximum page size, in rows. Counts only top-level rows, ignoring any nesting....
Class to build parquet_writer_options.
parquet_writer_options_builder(sink_info const &sink, table_view const &table)
Constructor from sink and table.
parquet_writer_options_builder()=default
Default constructor.
parquet_writer_options_builder & partitions(std::vector< partition_info > partitions)
Sets partitions in parquet_writer_options.
parquet_writer_options_builder & column_chunks_file_paths(std::vector< std::string > file_paths)
Sets column chunks file path to be set in the raw output metadata.
Settings for write_parquet().
void set_partitions(std::vector< partition_info > partitions)
Sets partitions.
static parquet_writer_options_builder builder(sink_info const &sink, table_view const &table)
Create builder to create parquet_writer_options.
parquet_writer_options()=default
Default constructor.
std::vector< std::string > const & get_column_chunks_file_paths() const
Returns Column chunks file paths to be set in the raw output metadata.
table_view get_table() const
Returns table_view.
void set_column_chunks_file_paths(std::vector< std::string > file_paths)
Sets column chunks file path to be set in the raw output metadata.
static parquet_writer_options_builder builder()
Create builder to create parquet_writer_options.
std::vector< partition_info > const & get_partitions() const
Returns partitions.
A set of cudf::column_view's of the same size.
A set of cudf::column's of the same size.
rmm::cuda_stream_view const get_default_stream()
Get the current default stream.
constexpr size_type default_row_group_size_rows
1 million rows per row group
constexpr int32_t default_column_index_truncate_length
truncate to 64 bytes
constexpr size_t default_row_group_size_bytes
128MB per row group
constexpr size_type default_max_page_fragment_size
5000 rows per page fragment
constexpr size_t default_max_dictionary_size
1MB dictionary size
table_with_metadata read_parquet(parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=rmm::mr::get_current_device_resource())
Reads a Parquet dataset into a set of columns.
constexpr size_t default_max_page_size_bytes
512KB per page
constexpr size_type default_max_page_size_rows
20k rows per page
compression_type
Compression algorithms.
statistics_freq
Column statistics granularity type for parquet/orc writers.
dictionary_policy
Control use of dictionary encoding for parquet writer.
@ STATISTICS_ROWGROUP
Per-Rowgroup column statistics.
@ ADAPTIVE
Use dictionary when it will not impact compression.
std::unique_ptr< std::vector< uint8_t > > merge_row_group_metadata(std::vector< std::unique_ptr< std::vector< uint8_t >>> const &metadata_list)
Merges multiple raw metadata blobs that were previously created by write_parquet into a single metada...
std::unique_ptr< std::vector< uint8_t > > write_parquet(parquet_writer_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream())
Writes a set of columns to parquet format.
cuda::mr::async_resource_ref< cuda::mr::device_accessible > device_async_resource_ref
device_memory_resource * get_current_device_resource()
int32_t size_type
Row index type for columns and tables.
cuDF-IO API type definitions
A generic expression that can be evaluated to return a value.
Destination information for write interfaces.
Struct used to describe column sorting metadata.
Source information for read interfaces.
Class definitions for (mutable)_table_view
Type declarations for libcudf.