parquet.hpp
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2026, NVIDIA CORPORATION.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #pragma once
7 
9 #include <cudf/io/detail/parquet.hpp>
10 #include <cudf/io/types.hpp>
12 #include <cudf/types.hpp>
13 #include <cudf/utilities/export.hpp>
15 
16 #include <memory>
17 #include <optional>
18 #include <string>
19 #include <utility>
20 #include <vector>
21 
22 namespace CUDF_EXPORT cudf {
23 namespace io {
30 constexpr size_t default_row_group_size_bytes =
31  std::numeric_limits<size_t>::max();
32 constexpr size_type default_row_group_size_rows = 1'000'000;
33 constexpr size_t default_max_page_size_bytes = 512 * 1024;
35 constexpr int32_t default_column_index_truncate_length = 64;
36 constexpr size_t default_max_dictionary_size = 1024 * 1024;
38 
48 [[nodiscard]] bool is_supported_read_parquet(compression_type compression);
49 
59 [[nodiscard]] bool is_supported_write_parquet(compression_type compression);
60 
62 
67  source_info _source;
68 
69  // Path in schema of column names to read; `nullopt` is all
70  std::optional<std::vector<std::string>> _column_names;
71  // Indices of top-level columns to read; `nullopt` is all (cannot be used alongside
72  // `_column_names`)
73  std::optional<std::vector<cudf::size_type>> _column_indices;
74 
75  // List of individual row groups to read (ignored if empty)
76  std::vector<std::vector<size_type>> _row_groups;
77  // Number of rows to skip from the start; Parquet stores the number of rows as int64_t
78  int64_t _skip_rows = 0;
79  // Number of rows to read; `nullopt` is all
80  std::optional<int64_t> _num_rows;
81 
82  // Read row groups that start at or after this byte offset into the source
83  size_t _skip_bytes = 0;
84  // Read row groups that start before _num_bytes bytes after _skip_bytes into the source
85  std::optional<size_t> _num_bytes;
86 
87  // Predicate filter as AST to filter output rows.
88  std::optional<std::reference_wrapper<ast::expression const>> _filter;
89 
90  // Whether to store string data as categorical type
91  bool _convert_strings_to_categories = false;
92  // Whether to use PANDAS metadata to load columns
93  bool _use_pandas_metadata = true;
94  // Whether to read and use ARROW schema
95  bool _use_arrow_schema = true;
96  // Whether to allow reading matching select columns from mismatched Parquet files.
97  bool _allow_mismatched_pq_schemas = false;
98  // Whether to ignore non-existent projected columns
99  bool _ignore_missing_columns = true;
100  // Cast timestamp columns to a specific type
101  data_type _timestamp_type{type_id::EMPTY};
102  // Cast decimal columns to a specific width
103  type_id _decimal_width{type_id::EMPTY};
104  // Whether to use JIT compilation for filtering
105  bool _use_jit_filter = false;
106  // Whether column name matching is case sensitive. In case of multiple
107  // case-insensitive matches, the first matched column is selected
108  bool _case_sensitive_names = true;
109 
110  std::optional<std::vector<reader_column_schema>> _reader_column_schema;
111 
117  explicit parquet_reader_options(source_info src) : _source{std::move(src)} {}
118 
120 
121  public:
128  explicit parquet_reader_options() = default;
129 
138 
144  [[nodiscard]] source_info const& get_source() const { return _source; }
145 
151  [[nodiscard]] bool is_enabled_convert_strings_to_categories() const
152  {
153  return _convert_strings_to_categories;
154  }
155 
161  [[nodiscard]] bool is_enabled_use_pandas_metadata() const { return _use_pandas_metadata; }
162 
168  [[nodiscard]] bool is_enabled_use_arrow_schema() const { return _use_arrow_schema; }
169 
177  [[nodiscard]] bool is_enabled_allow_mismatched_pq_schemas() const
178  {
179  return _allow_mismatched_pq_schemas;
180  }
181 
189  [[nodiscard]] bool is_enabled_ignore_missing_columns() const { return _ignore_missing_columns; }
190 
196  [[nodiscard]] std::optional<std::vector<reader_column_schema>> get_column_schema() const
197  {
198  return _reader_column_schema;
199  }
200 
206  [[nodiscard]] int64_t get_skip_rows() const { return _skip_rows; }
207 
214  [[nodiscard]] std::optional<int64_t> const& get_num_rows() const { return _num_rows; }
215 
222  [[nodiscard]] size_t get_skip_bytes() const { return _skip_bytes; }
223 
230  [[nodiscard]] std::optional<size_t> const& get_num_bytes() const { return _num_bytes; }
231 
237  [[nodiscard]] [[deprecated("Use `get_column_names` instead.")]] auto const& get_columns() const
238  {
239  return _column_names;
240  }
241 
247  [[nodiscard]] auto const& get_column_names() const { return _column_names; }
248 
254  [[nodiscard]] auto const& get_column_indices() const { return _column_indices; }
255 
261  [[nodiscard]] auto const& get_row_groups() const { return _row_groups; }
262 
268  [[nodiscard]] auto const& get_filter() const { return _filter; }
269 
275  [[nodiscard]] data_type get_timestamp_type() const { return _timestamp_type; }
276 
282  [[nodiscard]] type_id get_decimal_width() const { return _decimal_width; }
283 
289  [[nodiscard]] bool is_enabled_use_jit_filter() const { return _use_jit_filter; }
290 
299  [[nodiscard]] bool is_enabled_case_sensitive_names() const { return _case_sensitive_names; }
300 
306  void set_source(source_info src) { _source = std::move(src); }
307 
330  [[deprecated("Use `set_column_names` instead.")]] void set_columns(
331  std::vector<std::string> column_names)
332  {
333  set_column_names(std::move(column_names));
334  }
335 
356  void set_column_names(std::vector<std::string> column_names)
357  {
358  CUDF_EXPECTS(not _column_indices.has_value(),
359  "Cannot select columns by indices and names simultaneously");
360  _column_names = std::move(column_names);
361  }
362 
374  void set_column_indices(std::vector<cudf::size_type> col_indices)
375  {
376  CUDF_EXPECTS(not _column_names.has_value(),
377  "Cannot select columns by indices and names simultaneously");
378  _column_indices = std::move(col_indices);
379  }
380 
398  void set_row_groups(std::vector<std::vector<size_type>> row_groups);
399 
430  void set_filter(ast::expression const& filter) { _filter = filter; }
431 
437  void enable_convert_strings_to_categories(bool val) { _convert_strings_to_categories = val; }
438 
444  void enable_use_pandas_metadata(bool val) { _use_pandas_metadata = val; }
445 
451  void enable_use_arrow_schema(bool val) { _use_arrow_schema = val; }
452 
460  void enable_allow_mismatched_pq_schemas(bool val) { _allow_mismatched_pq_schemas = val; }
461 
468  void enable_ignore_missing_columns(bool val) { _ignore_missing_columns = val; }
469 
476  void set_column_schema(std::vector<reader_column_schema> val)
477  {
478  _reader_column_schema = std::move(val);
479  }
480 
486  void set_skip_rows(int64_t val);
487 
496  void set_num_rows(int64_t val);
497 
503  void set_skip_bytes(size_t val);
504 
510  void set_num_bytes(size_t val);
511 
517  void set_timestamp_type(data_type type) { _timestamp_type = type; }
518 
525  void set_decimal_width(type_id width) { _decimal_width = width; }
526 
535  void enable_case_sensitive_names(bool val) { _case_sensitive_names = val; }
536 };
537 
542  parquet_reader_options options;
543 
544  public:
552 
558  explicit parquet_reader_options_builder(source_info src) : options{std::move(src)} {}
559 
568  [[deprecated("Use `column_names` instead.")]] parquet_reader_options_builder& columns(
569  std::vector<std::string> column_names)
570  {
571  return this->column_names(std::move(column_names));
572  }
573 
580  parquet_reader_options_builder& column_names(std::vector<std::string> column_names)
581  {
582  options.set_column_names(std::move(column_names));
583  return *this;
584  }
585 
592  parquet_reader_options_builder& column_indices(std::vector<cudf::size_type> col_indices)
593  {
594  options.set_column_indices(std::move(col_indices));
595  return *this;
596  }
597 
604  parquet_reader_options_builder& row_groups(std::vector<std::vector<size_type>> row_groups)
605  {
606  options.set_row_groups(std::move(row_groups));
607  return *this;
608  }
609 
615  {
616  options.set_filter(filter);
617  return *this;
618  }
619 
627  {
628  options._convert_strings_to_categories = val;
629  return *this;
630  }
631 
639  {
640  options._use_pandas_metadata = val;
641  return *this;
642  }
643 
651  {
652  options._use_arrow_schema = val;
653  return *this;
654  }
655 
666  {
667  options._allow_mismatched_pq_schemas = val;
668  return *this;
669  }
670 
679  {
680  options._ignore_missing_columns = val;
681  return *this;
682  }
683 
690  parquet_reader_options_builder& set_column_schema(std::vector<reader_column_schema> val)
691  {
692  options._reader_column_schema = std::move(val);
693  return *this;
694  }
695 
703  {
704  options.set_skip_rows(val);
705  return *this;
706  }
707 
718  {
719  options.set_num_rows(val);
720  return *this;
721  }
722 
730  {
731  options.set_skip_bytes(val);
732  return *this;
733  }
734 
742  {
743  options.set_num_bytes(val);
744  return *this;
745  }
746 
754  {
755  options._timestamp_type = type;
756  return *this;
757  }
758 
767  {
768  options._decimal_width = width;
769  return *this;
770  }
771 
779  {
780  options._use_jit_filter = use_jit_filter;
781  return *this;
782  }
783 
794  {
795  options._case_sensitive_names = val;
796  return *this;
797  }
798 
802  operator parquet_reader_options&&() { return std::move(options); }
803 
811  parquet_reader_options&& build() { return std::move(options); }
812 };
813 
832  parquet_reader_options const& options,
835 
858  std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
859  std::vector<parquet::FileMetaData>&& parquet_metadatas,
860  parquet_reader_options const& options,
863 
874  public:
882 
897  std::size_t chunk_read_limit,
898  parquet_reader_options const& options,
901 
919  std::size_t chunk_read_limit,
920  std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
921  std::vector<parquet::FileMetaData>&& parquet_metadatas,
922  parquet_reader_options const& options,
925 
946  std::size_t chunk_read_limit,
947  std::size_t pass_read_limit,
948  parquet_reader_options const& options,
951 
975  std::size_t chunk_read_limit,
976  std::size_t pass_read_limit,
977  std::vector<std::unique_ptr<cudf::io::datasource>>&& sources,
978  std::vector<parquet::FileMetaData>&& parquet_metadatas,
979  parquet_reader_options const& options,
982 
991 
997  [[nodiscard]] bool has_next() const;
998 
1010  [[nodiscard]] table_with_metadata read_chunk() const;
1011 
1012  private:
1013  std::unique_ptr<cudf::io::parquet::detail::chunked_reader> reader;
1014 };
1015  // end of group
1027  int column_idx{};
1028  bool is_descending{false};
1029  bool is_nulls_first{true};
1030 };
1031 
1036  // Specify the sink to use for writer output
1037  sink_info _sink;
1038  // Specify the compression format to use
1039  compression_type _compression = compression_type::SNAPPY;
1040  // Specify the level of statistics in the output file
1042  // Optional associated metadata
1043  std::optional<table_input_metadata> _metadata;
1044  // Optional footer key_value_metadata
1045  std::vector<std::map<std::string, std::string>> _user_data;
1046  // Parquet writer can write INT96 or TIMESTAMP_MICROS. Defaults to TIMESTAMP_MICROS.
1047  // If true then overrides any per-column setting in _metadata.
1048  bool _write_timestamps_as_int96 = false;
1049  // Parquet writer can write timestamps as UTC
1050  // Defaults to true because libcudf timestamps are implicitly UTC
1051  bool _write_timestamps_as_UTC = true;
1052  // Whether to write ARROW schema
1053  bool _write_arrow_schema = false;
1054  // Maximum size of each row group (unless smaller than a single page)
1055  size_t _row_group_size_bytes = default_row_group_size_bytes;
1056  // Maximum number of rows in row group (unless smaller than a single page)
1057  size_type _row_group_size_rows = default_row_group_size_rows;
1058  // Maximum size of each page (uncompressed)
1059  size_t _max_page_size_bytes = default_max_page_size_bytes;
1060  // Maximum number of rows in a page
1061  size_type _max_page_size_rows = default_max_page_size_rows;
1062  // Maximum size of min or max values in column index
1063  int32_t _column_index_truncate_length = default_column_index_truncate_length;
1064  // When to use dictionary encoding for data
1065  dictionary_policy _dictionary_policy = dictionary_policy::ADAPTIVE;
1066  // Maximum size of column chunk dictionary (in bytes)
1067  size_t _max_dictionary_size = default_max_dictionary_size;
1068  // Maximum number of rows in a page fragment
1069  std::optional<size_type> _max_page_fragment_size;
1070  // Optional compression statistics
1071  std::shared_ptr<writer_compression_statistics> _compression_stats;
1072  // write V2 page headers?
1073  bool _v2_page_headers = false;
1074  // enable per-page compression decision for V2?
1075  bool _page_level_compression = false;
1076  // Which columns in _table are used for sorting
1077  std::optional<std::vector<sorting_column>> _sorting_columns;
1078 
1079  protected:
1085  explicit parquet_writer_options_base(sink_info sink) : _sink(std::move(sink)) {}
1086 
1087  public:
1094 
1100  [[nodiscard]] sink_info const& get_sink() const { return _sink; }
1101 
1107  [[nodiscard]] compression_type get_compression() const { return _compression; }
1108 
1114  [[nodiscard]] statistics_freq get_stats_level() const { return _stats_level; }
1115 
1121  [[nodiscard]] auto const& get_metadata() const { return _metadata; }
1122 
1128  [[nodiscard]] std::vector<std::map<std::string, std::string>> const& get_key_value_metadata()
1129  const
1130  {
1131  return _user_data;
1132  }
1133 
1139  [[nodiscard]] bool is_enabled_int96_timestamps() const { return _write_timestamps_as_int96; }
1140 
1146  [[nodiscard]] auto is_enabled_utc_timestamps() const { return _write_timestamps_as_UTC; }
1147 
1153  [[nodiscard]] auto is_enabled_write_arrow_schema() const { return _write_arrow_schema; }
1154 
1160  [[nodiscard]] auto get_row_group_size_bytes() const { return _row_group_size_bytes; }
1161 
1167  [[nodiscard]] auto get_row_group_size_rows() const { return _row_group_size_rows; }
1168 
1176  [[nodiscard]] auto get_max_page_size_bytes() const
1177  {
1178  return std::min(_max_page_size_bytes, get_row_group_size_bytes());
1179  }
1180 
1188  [[nodiscard]] auto get_max_page_size_rows() const
1189  {
1190  return std::min(_max_page_size_rows, get_row_group_size_rows());
1191  }
1192 
1198  [[nodiscard]] auto get_column_index_truncate_length() const
1199  {
1200  return _column_index_truncate_length;
1201  }
1202 
1208  [[nodiscard]] dictionary_policy get_dictionary_policy() const { return _dictionary_policy; }
1209 
1215  [[nodiscard]] auto get_max_dictionary_size() const { return _max_dictionary_size; }
1216 
1222  [[nodiscard]] auto get_max_page_fragment_size() const { return _max_page_fragment_size; }
1223 
1229  [[nodiscard]] std::shared_ptr<writer_compression_statistics> get_compression_statistics() const
1230  {
1231  return _compression_stats;
1232  }
1233 
1239  [[nodiscard]] auto is_enabled_write_v2_headers() const { return _v2_page_headers; }
1240 
1250  [[nodiscard]] auto is_enabled_page_level_compression() const { return _page_level_compression; }
1251 
1257  [[nodiscard]] auto const& get_sorting_columns() const { return _sorting_columns; }
1258 
1265 
1271  void set_key_value_metadata(std::vector<std::map<std::string, std::string>> metadata);
1272 
1285 
1292  void enable_int96_timestamps(bool req);
1293 
1299  void enable_utc_timestamps(bool val);
1300 
1307 
1313  void set_row_group_size_bytes(size_t size_bytes);
1314 
1321 
1327  void set_max_page_size_bytes(size_t size_bytes);
1328 
1335 
1341  void set_column_index_truncate_length(int32_t size_bytes);
1342 
1349 
1355  void set_max_dictionary_size(size_t size_bytes);
1356 
1363 
1369  void set_compression_statistics(std::shared_ptr<writer_compression_statistics> comp_stats);
1370 
1376  void enable_write_v2_headers(bool val);
1377 
1388 
1394  void set_sorting_columns(std::vector<sorting_column> sorting_columns);
1395 };
1396 
1400 template <class BuilderT, class OptionsT>
1402  OptionsT _options;
1403 
1404  protected:
1410  inline OptionsT& get_options() { return _options; }
1411 
1417  explicit parquet_writer_options_builder_base(OptionsT options);
1418 
1419  public:
1426 
1433  BuilderT& metadata(table_input_metadata metadata);
1434 
1441  BuilderT& key_value_metadata(std::vector<std::map<std::string, std::string>> metadata);
1442 
1450 
1457  BuilderT& compression(compression_type compression);
1458 
1465  BuilderT& row_group_size_bytes(size_t val);
1466 
1474 
1485  BuilderT& max_page_size_bytes(size_t val);
1486 
1495 
1509  BuilderT& column_index_truncate_length(int32_t val);
1510 
1529 
1541  BuilderT& max_dictionary_size(size_t val);
1542 
1554 
1562  std::shared_ptr<writer_compression_statistics> const& comp_stats);
1563 
1570  BuilderT& int96_timestamps(bool enabled);
1571 
1578  BuilderT& utc_timestamps(bool enabled);
1579 
1586  BuilderT& write_arrow_schema(bool enabled);
1587 
1594  BuilderT& write_v2_headers(bool enabled);
1595 
1606  BuilderT& page_level_compression(bool enabled);
1607 
1614  BuilderT& sorting_columns(std::vector<sorting_column> sorting_columns);
1615 
1619  operator OptionsT&&();
1620 
1628  OptionsT&& build();
1629 };
1630 
1632 
1637  // Sets of columns to output
1638  table_view _table;
1639  // Partitions described as {start_row, num_rows} pairs
1640  std::vector<partition_info> _partitions;
1641  // Column chunks file paths to be set in the raw output metadata. One per output file
1642  std::vector<std::string> _column_chunks_file_paths;
1643 
1645 
1652  explicit parquet_writer_options(sink_info const& sink, table_view table);
1653 
1654  public:
1661 
1671 
1678 
1684  [[nodiscard]] table_view get_table() const { return _table; }
1685 
1691  [[nodiscard]] std::vector<partition_info> const& get_partitions() const { return _partitions; }
1692 
1698  [[nodiscard]] std::vector<std::string> const& get_column_chunks_file_paths() const
1699  {
1700  return _column_chunks_file_paths;
1701  }
1702 
1709  void set_partitions(std::vector<partition_info> partitions);
1710 
1717  void set_column_chunks_file_paths(std::vector<std::string> file_paths);
1718 };
1719 
1724  : public parquet_writer_options_builder_base<parquet_writer_options_builder,
1725  parquet_writer_options> {
1726  public:
1732  explicit parquet_writer_options_builder() = default;
1733 
1741 
1749  parquet_writer_options_builder& partitions(std::vector<partition_info> partitions);
1750 
1758  parquet_writer_options_builder& column_chunks_file_paths(std::vector<std::string> file_paths);
1759 };
1760 
1779 std::unique_ptr<std::vector<uint8_t>> write_parquet(
1781 
1791 std::unique_ptr<std::vector<uint8_t>> merge_row_group_metadata(
1792  std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list);
1793 
1795 
1806 
1808 
1809  public:
1816 
1825 };
1826 
1831  : public parquet_writer_options_builder_base<chunked_parquet_writer_options_builder,
1832  chunked_parquet_writer_options> {
1833  public:
1840 
1847 };
1848 
1869  public:
1876 
1890 
1906  std::vector<partition_info> const& partitions = {});
1907 
1917  std::unique_ptr<std::vector<uint8_t>> close(
1918  std::vector<std::string> const& column_chunks_file_path = {});
1919 
1921  std::unique_ptr<parquet::detail::writer> writer;
1922 };
1923  // end of group
1925 
1926 } // namespace io
1927 } // namespace CUDF_EXPORT cudf
Indicator for the logical data type of an element in a column.
Definition: types.hpp:277
The chunked parquet reader class to read Parquet file iteratively in to a series of tables,...
Definition: parquet.hpp:873
table_with_metadata read_chunk() const
Read a chunk of rows in the given Parquet file.
bool has_next() const
Check if there is any data in the given file has not yet read.
chunked_parquet_reader(std::size_t chunk_read_limit, std::vector< std::unique_ptr< cudf::io::datasource >> &&sources, std::vector< parquet::FileMetaData > &&parquet_metadatas, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Constructor for chunked reader using pre-existing Parquet datasources and file metadatas.
chunked_parquet_reader(std::size_t chunk_read_limit, std::size_t pass_read_limit, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Constructor for chunked reader.
chunked_parquet_reader(std::size_t chunk_read_limit, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Constructor for chunked reader.
chunked_parquet_reader(std::size_t chunk_read_limit, std::size_t pass_read_limit, std::vector< std::unique_ptr< cudf::io::datasource >> &&sources, std::vector< parquet::FileMetaData > &&parquet_metadatas, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Constructor for chunked reader using pre-existing Parquet datasources and file metadatas.
~chunked_parquet_reader()
Destructor, destroying the internal reader instance.
chunked_parquet_reader()
Default constructor, this should never be used.
Class to build chunked_parquet_writer_options.
Definition: parquet.hpp:1832
chunked_parquet_writer_options_builder()=default
Default constructor.
chunked_parquet_writer_options_builder(sink_info const &sink)
Constructor from sink.
Settings for chunked_parquet_writer.
Definition: parquet.hpp:1799
static chunked_parquet_writer_options_builder builder(sink_info const &sink)
creates builder to build chunked_parquet_writer_options.
chunked_parquet_writer_options()=default
Default constructor.
chunked parquet writer class to handle options and write tables in chunks.
Definition: parquet.hpp:1868
~chunked_parquet_writer()
Default destructor. This is added to not leak detail API.
chunked_parquet_writer(chunked_parquet_writer_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream())
Constructor with chunked writer options.
std::unique_ptr< std::vector< uint8_t > > close(std::vector< std::string > const &column_chunks_file_path={})
Finishes the chunked/streamed write process.
std::unique_ptr< parquet::detail::writer > writer
Unique pointer to impl writer class.
Definition: parquet.hpp:1921
chunked_parquet_writer & write(table_view const &table, std::vector< partition_info > const &partitions={})
Writes table to output.
chunked_parquet_writer()
Default constructor, this should never be used. This is added just to satisfy cython....
Builds parquet_reader_options to use for read_parquet().
Definition: parquet.hpp:541
parquet_reader_options_builder & num_bytes(size_t val)
Sets number of bytes after skipping to end reading row groups at.
Definition: parquet.hpp:741
parquet_reader_options_builder & use_arrow_schema(bool val)
Sets to enable/disable use of arrow schema to read.
Definition: parquet.hpp:650
parquet_reader_options_builder(source_info src)
Constructor from source info.
Definition: parquet.hpp:558
parquet_reader_options_builder & decimal_width(type_id width)
Sets the decimal width used to cast decimal columns.
Definition: parquet.hpp:766
parquet_reader_options_builder & skip_rows(int64_t val)
Sets number of rows to skip.
Definition: parquet.hpp:702
parquet_reader_options_builder & allow_mismatched_pq_schemas(bool val)
Sets to enable/disable reading of matching projected and filter columns from mismatched Parquet sourc...
Definition: parquet.hpp:665
parquet_reader_options_builder & column_names(std::vector< std::string > column_names)
Sets names of the columns to be read.
Definition: parquet.hpp:580
parquet_reader_options_builder & ignore_missing_columns(bool val)
Sets to enable/disable ignoring of non-existent projected columns while reading.
Definition: parquet.hpp:678
parquet_reader_options_builder & skip_bytes(size_t val)
Sets bytes to skip before starting reading row groups.
Definition: parquet.hpp:729
parquet_reader_options_builder & timestamp_type(data_type type)
timestamp_type used to cast timestamp columns.
Definition: parquet.hpp:753
parquet_reader_options_builder & use_pandas_metadata(bool val)
Sets to enable/disable use of pandas metadata to read.
Definition: parquet.hpp:638
parquet_reader_options_builder()=default
Default constructor.
parquet_reader_options_builder & num_rows(int64_t val)
Sets number of rows to read.
Definition: parquet.hpp:717
parquet_reader_options_builder & row_groups(std::vector< std::vector< size_type >> row_groups)
Sets vector of individual row groups to read.
Definition: parquet.hpp:604
parquet_reader_options_builder & set_column_schema(std::vector< reader_column_schema > val)
Sets reader metadata.
Definition: parquet.hpp:690
parquet_reader_options_builder & columns(std::vector< std::string > column_names)
Sets names of the columns to be read.
Definition: parquet.hpp:568
parquet_reader_options_builder & column_indices(std::vector< cudf::size_type > col_indices)
Sets the indices of top-level columns to be read from all input sources.
Definition: parquet.hpp:592
parquet_reader_options && build()
move parquet_reader_options member once it's built.
Definition: parquet.hpp:811
parquet_reader_options_builder & filter(ast::expression const &filter)
Sets AST based filter for predicate pushdown.
Definition: parquet.hpp:614
parquet_reader_options_builder & case_sensitive_names(bool val)
Sets whether column name matching is case sensitive.
Definition: parquet.hpp:793
parquet_reader_options_builder & use_jit_filter(bool use_jit_filter)
Enable/disable use of JIT for filter step.
Definition: parquet.hpp:778
parquet_reader_options_builder & convert_strings_to_categories(bool val)
Sets enable/disable conversion of strings to categories.
Definition: parquet.hpp:626
Settings for read_parquet().
Definition: parquet.hpp:66
data_type get_timestamp_type() const
Returns timestamp type used to cast timestamp columns.
Definition: parquet.hpp:275
parquet_reader_options()=default
Default constructor.
void enable_allow_mismatched_pq_schemas(bool val)
Sets to enable/disable reading of matching projected and filter columns from mismatched Parquet sourc...
Definition: parquet.hpp:460
void set_skip_rows(int64_t val)
Sets number of rows to skip.
bool is_enabled_use_jit_filter() const
Returns whether to use JIT compilation for filtering.
Definition: parquet.hpp:289
size_t get_skip_bytes() const
Returns bytes to skip before starting reading row groups.
Definition: parquet.hpp:222
bool is_enabled_ignore_missing_columns() const
Returns boolean depending on whether to ignore non-existent projected columns while reading.
Definition: parquet.hpp:189
static parquet_reader_options_builder builder(source_info src=source_info{})
Creates a parquet_reader_options_builder to build parquet_reader_options. By default,...
void enable_convert_strings_to_categories(bool val)
Sets to enable/disable conversion of strings to categories.
Definition: parquet.hpp:437
std::optional< std::vector< reader_column_schema > > get_column_schema() const
Returns optional tree of metadata.
Definition: parquet.hpp:196
void set_skip_bytes(size_t val)
Sets bytes to skip before starting reading row groups.
type_id get_decimal_width() const
Returns decimal width used to cast decimal columns.
Definition: parquet.hpp:282
void set_column_indices(std::vector< cudf::size_type > col_indices)
Sets the indices of top-level columns to be read from all input sources.
Definition: parquet.hpp:374
source_info const & get_source() const
Returns source info.
Definition: parquet.hpp:144
auto const & get_column_indices() const
Returns indices of top-level columns to be read, if set.
Definition: parquet.hpp:254
auto const & get_row_groups() const
Returns list of individual row groups to be read.
Definition: parquet.hpp:261
void set_decimal_width(type_id width)
Sets decimal width used to cast decimal columns.
Definition: parquet.hpp:525
void set_row_groups(std::vector< std::vector< size_type >> row_groups)
Specifies which row groups to read from each input source.
void enable_ignore_missing_columns(bool val)
Sets to enable/disable ignoring of non-existent projected columns while reading.
Definition: parquet.hpp:468
void set_source(source_info src)
Set a new source location.
Definition: parquet.hpp:306
auto const & get_columns() const
Returns names of column to be read, if set.
Definition: parquet.hpp:237
void set_timestamp_type(data_type type)
Sets timestamp_type used to cast timestamp columns.
Definition: parquet.hpp:517
void set_column_names(std::vector< std::string > column_names)
Sets the names of columns to be read from all input sources.
Definition: parquet.hpp:356
std::optional< int64_t > const & get_num_rows() const
Returns number of rows to read.
Definition: parquet.hpp:214
bool is_enabled_convert_strings_to_categories() const
Returns boolean depending on whether strings should be converted to categories.
Definition: parquet.hpp:151
void set_columns(std::vector< std::string > column_names)
Sets the names of columns to be read from all input sources.
Definition: parquet.hpp:330
void set_num_rows(int64_t val)
Sets number of rows to read.
void enable_case_sensitive_names(bool val)
Sets whether column name matching is case sensitive.
Definition: parquet.hpp:535
void set_num_bytes(size_t val)
Sets number of bytes after skipping to end reading row groups at.
void enable_use_pandas_metadata(bool val)
Sets to enable/disable use of pandas metadata to read.
Definition: parquet.hpp:444
void enable_use_arrow_schema(bool val)
Sets to enable/disable use of arrow schema to read.
Definition: parquet.hpp:451
bool is_enabled_use_pandas_metadata() const
Returns boolean depending on whether to use pandas metadata while reading.
Definition: parquet.hpp:161
bool is_enabled_allow_mismatched_pq_schemas() const
Returns boolean depending on whether to read matching projected and filter columns from mismatched Pa...
Definition: parquet.hpp:177
void set_column_schema(std::vector< reader_column_schema > val)
Sets reader column schema.
Definition: parquet.hpp:476
bool is_enabled_use_arrow_schema() const
Returns boolean depending on whether to use arrow schema while reading.
Definition: parquet.hpp:168
void set_filter(ast::expression const &filter)
Sets AST based filter for predicate pushdown.
Definition: parquet.hpp:430
auto const & get_filter() const
Returns AST based filter for predicate pushdown.
Definition: parquet.hpp:268
std::optional< size_t > const & get_num_bytes() const
Returns number of bytes after skipping to end reading row groups at.
Definition: parquet.hpp:230
auto const & get_column_names() const
Returns names of column to be read, if set.
Definition: parquet.hpp:247
int64_t get_skip_rows() const
Returns number of rows to skip from the start.
Definition: parquet.hpp:206
bool is_enabled_case_sensitive_names() const
Returns whether column name matching is case sensitive.
Definition: parquet.hpp:299
Base settings for write_parquet() and chunked_parquet_writer.
Definition: parquet.hpp:1035
void enable_utc_timestamps(bool val)
Sets preference for writing timestamps as UTC. Write timestamps as UTC if set to true.
void enable_write_v2_headers(bool val)
Sets preference for V2 page headers. Write V2 page headers if set to true.
auto const & get_sorting_columns() const
Returns the sorting_columns.
Definition: parquet.hpp:1257
auto get_row_group_size_bytes() const
Returns maximum row group size, in bytes.
Definition: parquet.hpp:1160
bool is_enabled_int96_timestamps() const
Returns true if timestamps will be written as INT96.
Definition: parquet.hpp:1139
void set_metadata(table_input_metadata metadata)
Sets metadata.
void set_row_group_size_rows(size_type size_rows)
Sets the maximum row group size, in rows.
parquet_writer_options_base(sink_info sink)
Constructor from sink.
Definition: parquet.hpp:1085
void set_stats_level(statistics_freq sf)
Sets the level of statistics.
auto get_row_group_size_rows() const
Returns maximum row group size, in rows.
Definition: parquet.hpp:1167
parquet_writer_options_base()=default
Default constructor.
void set_max_page_size_bytes(size_t size_bytes)
Sets the maximum uncompressed page size, in bytes.
void set_sorting_columns(std::vector< sorting_column > sorting_columns)
Sets sorting columns.
auto is_enabled_write_arrow_schema() const
Returns true if arrow schema will be written.
Definition: parquet.hpp:1153
auto is_enabled_write_v2_headers() const
Returns true if V2 page headers should be written.
Definition: parquet.hpp:1239
void set_dictionary_policy(dictionary_policy policy)
Sets the policy for dictionary use.
auto get_max_page_size_bytes() const
Returns the maximum uncompressed page size, in bytes.
Definition: parquet.hpp:1176
void set_max_dictionary_size(size_t size_bytes)
Sets the maximum dictionary size, in bytes.
compression_type get_compression() const
Returns compression format used.
Definition: parquet.hpp:1107
auto get_max_dictionary_size() const
Returns maximum dictionary size, in bytes.
Definition: parquet.hpp:1215
void set_compression(compression_type compression)
Sets compression type.
dictionary_policy get_dictionary_policy() const
Returns policy for dictionary use.
Definition: parquet.hpp:1208
void set_compression_statistics(std::shared_ptr< writer_compression_statistics > comp_stats)
Sets the pointer to the output compression statistics.
std::shared_ptr< writer_compression_statistics > get_compression_statistics() const
Returns a shared pointer to the user-provided compression statistics.
Definition: parquet.hpp:1229
void set_max_page_size_rows(size_type size_rows)
Sets the maximum page size, in rows.
void enable_page_level_compression(bool val)
Sets preference for per-page compression decision in V2 pages.
auto get_max_page_fragment_size() const
Returns maximum page fragment size, in rows.
Definition: parquet.hpp:1222
void set_key_value_metadata(std::vector< std::map< std::string, std::string >> metadata)
Sets metadata.
void set_max_page_fragment_size(size_type size_rows)
Sets the maximum page fragment size, in rows.
void enable_write_arrow_schema(bool val)
Sets preference for writing arrow schema. Write arrow schema if set to true.
auto is_enabled_utc_timestamps() const
Returns true if timestamps will be written as UTC.
Definition: parquet.hpp:1146
void set_row_group_size_bytes(size_t size_bytes)
Sets the maximum row group size, in bytes.
auto is_enabled_page_level_compression() const
Returns true if per-page compression is enabled for V2 pages.
Definition: parquet.hpp:1250
void enable_int96_timestamps(bool req)
Sets timestamp writing preferences. INT96 timestamps will be written if true and TIMESTAMP_MICROS wil...
statistics_freq get_stats_level() const
Returns level of statistics requested in output file.
Definition: parquet.hpp:1114
std::vector< std::map< std::string, std::string > > const & get_key_value_metadata() const
Returns Key-Value footer metadata information.
Definition: parquet.hpp:1128
auto const & get_metadata() const
Returns associated metadata.
Definition: parquet.hpp:1121
auto get_max_page_size_rows() const
Returns maximum page size, in rows.
Definition: parquet.hpp:1188
auto get_column_index_truncate_length() const
Returns maximum length of min or max values in column index, in bytes.
Definition: parquet.hpp:1198
void set_column_index_truncate_length(int32_t size_bytes)
Sets the maximum length of min or max values in column index, in bytes.
sink_info const & get_sink() const
Returns sink info.
Definition: parquet.hpp:1100
Base class for Parquet options builders.
Definition: parquet.hpp:1401
BuilderT & compression(compression_type compression)
Sets compression type.
BuilderT & key_value_metadata(std::vector< std::map< std::string, std::string >> metadata)
Sets Key-Value footer metadata.
OptionsT & get_options()
Return reference to the options object being built.
Definition: parquet.hpp:1410
BuilderT & utc_timestamps(bool enabled)
Set to true if timestamps are to be written as UTC.
BuilderT & max_dictionary_size(size_t val)
Sets the maximum dictionary size, in bytes.
BuilderT & max_page_size_bytes(size_t val)
Sets the maximum uncompressed page size, in bytes.
OptionsT && build()
move options member once it's built.
BuilderT & stats_level(statistics_freq sf)
Sets the level of statistics.
BuilderT & column_index_truncate_length(int32_t val)
Sets the desired maximum size in bytes for min and max values in the column index.
BuilderT & compression_statistics(std::shared_ptr< writer_compression_statistics > const &comp_stats)
Sets the pointer to the output compression statistics.
BuilderT & metadata(table_input_metadata metadata)
Sets metadata.
BuilderT & dictionary_policy(enum dictionary_policy val)
Sets the policy for dictionary use.
parquet_writer_options_builder_base(OptionsT options)
Constructor from options.
BuilderT & page_level_compression(bool enabled)
Set to true to enable per-page compression decisions for V2 pages.
BuilderT & int96_timestamps(bool enabled)
Sets whether int96 timestamps are written or not.
BuilderT & row_group_size_bytes(size_t val)
Sets the maximum row group size, in bytes.
BuilderT & sorting_columns(std::vector< sorting_column > sorting_columns)
Sets column sorting metadata.
BuilderT & write_arrow_schema(bool enabled)
Set to true if arrow schema is to be written.
parquet_writer_options_builder_base()=default
Default constructor.
BuilderT & write_v2_headers(bool enabled)
Set to true if V2 page headers are to be written.
BuilderT & max_page_fragment_size(size_type val)
Sets the maximum page fragment size, in rows.
BuilderT & row_group_size_rows(size_type val)
Sets the maximum number of rows in output row groups.
BuilderT & max_page_size_rows(size_type val)
Sets the maximum page size, in rows. Counts only top-level rows, ignoring any nesting....
Class to build parquet_writer_options.
Definition: parquet.hpp:1725
parquet_writer_options_builder(sink_info const &sink, table_view const &table)
Constructor from sink and table.
parquet_writer_options_builder()=default
Default constructor.
parquet_writer_options_builder & partitions(std::vector< partition_info > partitions)
Sets partitions in parquet_writer_options.
parquet_writer_options_builder & column_chunks_file_paths(std::vector< std::string > file_paths)
Sets column chunks file path to be set in the raw output metadata.
Settings for write_parquet().
Definition: parquet.hpp:1636
void set_partitions(std::vector< partition_info > partitions)
Sets partitions.
static parquet_writer_options_builder builder(sink_info const &sink, table_view const &table)
Create builder to create parquet_writer_options.
parquet_writer_options()=default
Default constructor.
std::vector< std::string > const & get_column_chunks_file_paths() const
Returns Column chunks file paths to be set in the raw output metadata.
Definition: parquet.hpp:1698
table_view get_table() const
Returns table_view.
Definition: parquet.hpp:1684
void set_column_chunks_file_paths(std::vector< std::string > file_paths)
Sets column chunks file path to be set in the raw output metadata.
static parquet_writer_options_builder builder()
Create builder to create parquet_writer_options.
std::vector< partition_info > const & get_partitions() const
Returns partitions.
Definition: parquet.hpp:1691
Metadata for a table.
Definition: io/types.hpp:893
A set of cudf::column_view's of the same size.
Definition: table_view.hpp:189
A set of cudf::column's of the same size.
Definition: table.hpp:29
rmm::cuda_stream_view const get_default_stream()
Get the current default stream.
table_with_metadata read_parquet(std::vector< std::unique_ptr< cudf::io::datasource >> &&sources, std::vector< parquet::FileMetaData > &&parquet_metadatas, parquet_reader_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Reads a Parquet dataset into a set of columns using pre-existing Parquet datasources and file metadat...
constexpr size_type default_row_group_size_rows
1 million rows per row group
Definition: parquet.hpp:32
constexpr int32_t default_column_index_truncate_length
truncate to 64 bytes
Definition: parquet.hpp:35
constexpr size_t default_row_group_size_bytes
Infinite bytes per row group.
Definition: parquet.hpp:30
bool is_supported_write_parquet(compression_type compression)
Check if the compression type is supported for writing Parquet files.
constexpr size_type default_max_page_fragment_size
5000 rows per page fragment
Definition: parquet.hpp:37
constexpr size_t default_max_dictionary_size
1MB dictionary size
Definition: parquet.hpp:36
bool is_supported_read_parquet(compression_type compression)
Check if the compression type is supported for reading Parquet files.
constexpr size_t default_max_page_size_bytes
512KB per page
Definition: parquet.hpp:33
constexpr size_type default_max_page_size_rows
20k rows per page
Definition: parquet.hpp:34
statistics_freq
Column statistics granularity type for parquet/orc writers.
Definition: io/types.hpp:85
dictionary_policy
Control use of dictionary encoding for parquet writer.
Definition: io/types.hpp:214
compression_type
Compression algorithms.
Definition: io/types.hpp:46
@ STATISTICS_ROWGROUP
Per-Rowgroup column statistics.
Definition: io/types.hpp:87
@ ADAPTIVE
Use dictionary when it will not impact compression.
Definition: io/types.hpp:216
std::unique_ptr< std::vector< uint8_t > > merge_row_group_metadata(std::vector< std::unique_ptr< std::vector< uint8_t >>> const &metadata_list)
Merges multiple raw metadata blobs that were previously created by write_parquet into a single metada...
std::unique_ptr< std::vector< uint8_t > > write_parquet(parquet_writer_options const &options, rmm::cuda_stream_view stream=cudf::get_default_stream())
Writes a set of columns to parquet format.
rmm::device_async_resource_ref get_current_device_resource_ref()
Get the current device memory resource reference.
cuda::mr::resource_ref< cuda::mr::device_accessible > device_async_resource_ref
std::vector< std::unique_ptr< column > > filter(std::vector< column_view > const &predicate_columns, std::string const &predicate_udf, std::vector< column_view > const &filter_columns, bool is_ptx, std::optional< void * > user_data=std::nullopt, null_aware is_null_aware=null_aware::NO, output_nullability predicate_nullability=output_nullability::PRESERVE, rmm::cuda_stream_view stream=cudf::get_default_stream(), rmm::device_async_resource_ref mr=cudf::get_current_device_resource_ref())
Creates a new column by applying a filter function against every element of the input columns.
#define CUDF_EXPECTS(...)
Macro for checking (pre-)conditions that throws an exception when a condition is violated.
Definition: error.hpp:145
int32_t size_type
Row index type for columns and tables.
Definition: types.hpp:84
type_id
Identifies a column's logical element type.
Definition: types.hpp:192
cuDF-IO API type definitions
cuDF interfaces
Definition: host_udf.hpp:26
A generic expression that can be evaluated to return a value.
Definition: expressions.hpp:62
Destination information for write interfaces.
Definition: io/types.hpp:471
Struct used to describe column sorting metadata.
Definition: parquet.hpp:1026
Source information for read interfaces.
Definition: io/types.hpp:316
Table with table metadata used by io readers to return the metadata by value.
Definition: io/types.hpp:292
Class definitions for (mutable)_table_view
Type declarations for libcudf.