Namespaces | Functions
gpu.hpp File Reference
#include <cuml/experimental/fil/detail/forest.hpp>
#include <cuml/experimental/fil/detail/index_type.hpp>
#include <cuml/experimental/fil/detail/postprocessor.hpp>
#include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp>
#include <cuml/experimental/fil/detail/raft_proto/device_id.hpp>
#include <cuml/experimental/fil/detail/raft_proto/device_type.hpp>
#include <cuml/experimental/fil/infer_kind.hpp>
#include <cstddef>
#include <optional>
Include dependency graph for gpu.hpp:

Go to the source code of this file.

Namespaces

 ML
 
 ML::experimental
 
 ML::experimental::fil
 
 ML::experimental::fil::detail
 
 ML::experimental::fil::detail::inference
 

Functions

template<raft_proto::device_type D, bool has_categorical_nodes, typename forest_t , typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t>
std::enable_if_t< D==raft_proto::device_type::gpu, void > ML::experimental::fil::detail::inference::infer (forest_t const &forest, postprocessor< typename forest_t::io_type > const &postproc, typename forest_t::io_type *output, typename forest_t::io_type *input, index_type row_count, index_type col_count, index_type class_count, vector_output_t vector_output=nullptr, categorical_data_t categorical_data=nullptr, infer_kind infer_type=infer_kind::default_kind, std::optional< index_type > specified_chunk_size=std::nullopt, raft_proto::device_id< D > device=raft_proto::device_id< D >{}, raft_proto::cuda_stream stream=raft_proto::cuda_stream{})