All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Modules Pages
prefetch_resource_adaptor.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2024-2025, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <rmm/detail/export.hpp>
20 #include <rmm/prefetch.hpp>
21 #include <rmm/resource_ref.hpp>
22 
23 #include <cstddef>
24 
25 namespace RMM_NAMESPACE {
26 namespace mr {
38 template <typename Upstream>
40  public:
47  prefetch_resource_adaptor(device_async_resource_ref upstream) : upstream_{upstream} {}
48 
57  prefetch_resource_adaptor(Upstream* upstream)
58  : upstream_{to_device_async_resource_ref_checked(upstream)}
59  {
60  }
61 
62  prefetch_resource_adaptor() = delete;
63  ~prefetch_resource_adaptor() override = default;
65  prefetch_resource_adaptor& operator=(prefetch_resource_adaptor const&) = delete;
67  default;
69  default;
70 
74  [[nodiscard]] rmm::device_async_resource_ref get_upstream_resource() const noexcept
75  {
76  return upstream_;
77  }
78 
79  private:
93  void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
94  {
95  void* ptr = get_upstream_resource().allocate_async(bytes, stream);
96  rmm::prefetch(ptr, bytes, rmm::get_current_cuda_device(), stream);
97  return ptr;
98  }
99 
107  void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override
108  {
109  get_upstream_resource().deallocate_async(ptr, bytes, stream);
110  }
111 
119  bool do_is_equal(device_memory_resource const& other) const noexcept override
120  {
121  if (this == &other) { return true; }
122  auto cast = dynamic_cast<prefetch_resource_adaptor<Upstream> const*>(&other);
123  if (cast == nullptr) { return false; }
124  return get_upstream_resource() == cast->get_upstream_resource();
125  }
126 
127  // the upstream resource used for satisfying allocation requests
128  device_async_resource_ref upstream_;
129 };
130  // end of group
132 } // namespace mr
133 } // namespace RMM_NAMESPACE
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:39
Base class for all librmm device memory allocation.
Definition: device_memory_resource.hpp:93
void * allocate_async(std::size_t bytes, std::size_t alignment, cuda_stream_view stream)
Allocates memory of size at least bytes.
Definition: device_memory_resource.hpp:216
Resource that prefetches all memory allocations.
Definition: prefetch_resource_adaptor.hpp:39
prefetch_resource_adaptor(prefetch_resource_adaptor &&) noexcept=default
Default move constructor.
prefetch_resource_adaptor(Upstream *upstream)
Construct a new prefetch resource adaptor using upstream to satisfy allocation requests.
Definition: prefetch_resource_adaptor.hpp:57
prefetch_resource_adaptor(device_async_resource_ref upstream)
Construct a new prefetch resource adaptor using upstream to satisfy allocation requests.
Definition: prefetch_resource_adaptor.hpp:47
cuda_device_id get_current_cuda_device()
Returns a cuda_device_id for the current device.
Definition: cuda_device.hpp:99
cuda::mr::async_resource_ref< cuda::mr::device_accessible > device_async_resource_ref
Alias for a cuda::mr::async_resource_ref with the property cuda::mr::device_accessible.
Definition: resource_ref.hpp:41
device_async_resource_ref to_device_async_resource_ref_checked(Resource *res)
Convert pointer to memory resource into device_async_resource_ref, checking for nullptr
Definition: resource_ref.hpp:79
void prefetch(void const *ptr, std::size_t size, rmm::cuda_device_id device, rmm::cuda_stream_view stream)
Prefetch memory to the specified device on the specified stream.
Definition: prefetch.hpp:46