8 #include "../condensed_hierarchy.cu"
14 #include <raft/core/device_mdspan.hpp>
15 #include <raft/label/classlabels.cuh>
16 #include <raft/linalg/matrix_vector_op.cuh>
17 #include <raft/linalg/norm.cuh>
18 #include <raft/sparse/convert/csr.cuh>
19 #include <raft/sparse/op/sort.cuh>
20 #include <raft/util/cudart_utils.hpp>
22 #include <rmm/device_uvector.hpp>
23 #include <rmm/exec_policy.hpp>
25 #include <cub/cub.cuh>
26 #include <cuda/functional>
27 #include <cuda/std/tuple>
28 #include <thrust/copy.h>
29 #include <thrust/execution_policy.h>
30 #include <thrust/for_each.h>
31 #include <thrust/iterator/zip_iterator.h>
32 #include <thrust/reduce.h>
33 #include <thrust/sort.h>
34 #include <thrust/transform.h>
35 #include <thrust/transform_reduce.h>
57 template <
typename value_
idx,
typename value_t,
typename CUBReduceFunc>
61 const value_idx* offsets,
63 CUBReduceFunc cub_reduce_func)
65 rmm::device_uvector<char> d_temp_storage(0, stream);
66 size_t temp_storage_bytes = 0;
67 cub_reduce_func(
nullptr, temp_storage_bytes, in, out, n_segments, offsets, offsets + 1, stream);
68 d_temp_storage.resize(temp_storage_bytes, stream);
71 d_temp_storage.data(), temp_storage_bytes, in, out, n_segments, offsets, offsets + 1, stream);
83 template <
typename value_
idx,
typename value_t>
87 auto stream = handle.get_stream();
88 auto thrust_policy = handle.get_thrust_policy();
94 value_idx cluster_tree_edges = thrust::transform_reduce(
98 cuda::proclaim_return_type<value_idx>(
99 [=] __device__(value_idx a) -> value_idx {
return static_cast<value_idx
>(a > 1); }),
100 static_cast<value_idx
>(0),
101 cuda::std::plus<value_idx>());
104 rmm::device_uvector<value_idx> cluster_parents(cluster_tree_edges, stream);
105 rmm::device_uvector<value_idx> cluster_children(cluster_tree_edges, stream);
106 rmm::device_uvector<value_t> cluster_lambdas(cluster_tree_edges, stream);
107 rmm::device_uvector<value_idx> cluster_sizes(cluster_tree_edges, stream);
109 auto in = thrust::make_zip_iterator(cuda::std::make_tuple(parents, children, lambdas, sizes));
111 auto out = thrust::make_zip_iterator(cuda::std::make_tuple(
112 cluster_parents.data(), cluster_children.data(), cluster_lambdas.data(), cluster_sizes.data()));
114 thrust::copy_if(thrust_policy,
119 [=] __device__(value_idx a) {
return a > 1; });
123 cluster_parents.begin(),
124 cluster_parents.end(),
125 cluster_parents.begin(),
126 [n_leaves] __device__(value_idx a) {
return a - n_leaves; });
128 cluster_children.begin(),
129 cluster_children.end(),
130 cluster_children.begin(),
131 [n_leaves] __device__(value_idx a) {
return a - n_leaves; });
137 std::move(cluster_parents),
138 std::move(cluster_children),
139 std::move(cluster_lambdas),
140 std::move(cluster_sizes));
152 template <
typename value_
idx,
typename value_t>
155 value_idx* sorted_parents,
158 auto stream = handle.get_stream();
159 auto thrust_policy = handle.get_thrust_policy();
168 auto index_op = [n_leaves] __device__(
const auto& x) {
return x - n_leaves; };
170 thrust_policy, sorted_parents, sorted_parents + n_edges, sorted_parents, index_op);
172 raft::sparse::convert::sorted_coo_to_csr(sorted_parents, n_edges, indptr, n_clusters + 1, stream);
175 template <
typename value_
idx,
typename value_t>
176 void normalize(value_t* data, value_idx n,
size_t m, cudaStream_t stream)
178 rmm::device_uvector<value_t> sums(m, stream);
181 raft::linalg::rowNorm<raft::linalg::NormType::L1Norm, true, value_t, size_t>(
182 sums.data(), data, (
size_t)n, m, stream);
185 raft::linalg::matrixVectorOp<true, false>(
187 const_cast<value_t*
>(data),
191 [] __device__(value_t mat_in, value_t vec_in) {
return mat_in / vec_in; },
205 template <
typename value_
idx,
typename value_t>
206 void softmax(
const raft::handle_t& handle, value_t* data, value_idx n,
size_t m)
208 rmm::device_uvector<value_t> linf_norm(m, handle.get_stream());
210 auto data_const_view =
211 raft::make_device_matrix_view<const value_t, value_idx, raft::row_major>(data, (
int)m, n);
213 raft::make_device_matrix_view<value_t, value_idx, raft::row_major>(data, (
int)m, n);
214 auto linf_norm_const_view =
215 raft::make_device_vector_view<const value_t, value_idx>(linf_norm.data(), (
int)m);
216 auto linf_norm_view = raft::make_device_vector_view<value_t, value_idx>(linf_norm.data(), (
int)m);
218 raft::linalg::norm<raft::linalg::NormType::LinfNorm, raft::Apply::ALONG_ROWS>(
219 handle, data_const_view, linf_norm_view);
221 raft::linalg::matrix_vector_op<raft::Apply::ALONG_COLUMNS>(
224 linf_norm_const_view,
226 [] __device__(value_t mat_in, value_t vec_in) {
return exp(mat_in - vec_in); });
Definition: hdbscan.hpp:29
value_idx * get_sizes()
Definition: hdbscan.hpp:107
value_t * get_lambdas()
Definition: hdbscan.hpp:106
value_idx get_n_leaves() const
Definition: hdbscan.hpp:110
value_idx get_n_edges()
Definition: hdbscan.hpp:108
value_idx * get_children()
Definition: hdbscan.hpp:105
int get_n_clusters()
Definition: hdbscan.hpp:109
value_idx * get_parents()
Definition: hdbscan.hpp:104
Common::CondensedHierarchy< value_idx, value_t > make_cluster_tree(const raft::handle_t &handle, Common::CondensedHierarchy< value_idx, value_t > &condensed_tree)
Definition: utils.h:84
void softmax(const raft::handle_t &handle, value_t *data, value_idx n, size_t m)
Definition: utils.h:206
void normalize(value_t *data, value_idx n, size_t m, cudaStream_t stream)
Definition: utils.h:176
void cub_segmented_reduce(const value_t *in, value_t *out, int n_segments, const value_idx *offsets, cudaStream_t stream, CUBReduceFunc cub_reduce_func)
Definition: utils.h:58
void parent_csr(const raft::handle_t &handle, Common::CondensedHierarchy< value_idx, value_t > &condensed_tree, value_idx *sorted_parents, value_idx *indptr)
Definition: utils.h:153
void transform(const raft::handle_t &handle, const KMeansParams ¶ms, const float *centroids, const float *X, int n_samples, int n_features, float *X_new)
Transform X to a cluster-distance space.
Definition: dbscan.hpp:18