limiting_resource_adaptor.hpp
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 #pragma once
6 
7 #include <rmm/aligned.hpp>
8 #include <rmm/detail/error.hpp>
9 #include <rmm/detail/export.hpp>
10 #include <rmm/detail/format.hpp>
13 #include <rmm/resource_ref.hpp>
14 
15 #include <atomic>
16 #include <cstddef>
17 
18 namespace RMM_NAMESPACE {
19 namespace mr {
37 template <typename Upstream>
39  public:
49  std::size_t allocation_limit,
50  std::size_t alignment = CUDA_ALLOCATION_ALIGNMENT)
51  : upstream_{upstream},
52  allocation_limit_{allocation_limit},
53  allocated_bytes_(0),
54  alignment_(alignment)
55  {
56  }
57 
68  limiting_resource_adaptor(Upstream* upstream,
69  std::size_t allocation_limit,
70  std::size_t alignment = CUDA_ALLOCATION_ALIGNMENT)
71  : upstream_{to_device_async_resource_ref_checked(upstream)},
72  allocation_limit_{allocation_limit},
73  allocated_bytes_(0),
74  alignment_(alignment)
75  {
76  }
77 
78  limiting_resource_adaptor() = delete;
79  ~limiting_resource_adaptor() override = default;
82  default;
83  limiting_resource_adaptor& operator=(limiting_resource_adaptor const&) = delete;
85  default;
86 
90  [[nodiscard]] device_async_resource_ref get_upstream_resource() const noexcept
91  {
92  return upstream_;
93  }
94 
104  [[nodiscard]] std::size_t get_allocated_bytes() const { return allocated_bytes_; }
105 
113  [[nodiscard]] std::size_t get_allocation_limit() const { return allocation_limit_; }
114 
115  private:
129  void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
130  {
131  auto const proposed_size = align_up(bytes, alignment_);
132  auto const old = allocated_bytes_.fetch_add(proposed_size);
133  if (old + proposed_size <= allocation_limit_) {
134  try {
135  return get_upstream_resource().allocate(stream, bytes);
136  } catch (...) {
137  allocated_bytes_ -= proposed_size;
138  throw;
139  }
140  }
141 
142  allocated_bytes_ -= proposed_size;
143  auto const msg = std::string("Exceeded memory limit (failed to allocate ") +
144  rmm::detail::format_bytes(bytes) + ")";
145  RMM_FAIL(msg.c_str(), rmm::out_of_memory);
146  }
147 
155  void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) noexcept override
156  {
157  std::size_t allocated_size = align_up(bytes, alignment_);
158  get_upstream_resource().deallocate(stream, ptr, bytes);
159  allocated_bytes_ -= allocated_size;
160  }
161 
169  [[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
170  {
171  if (this == &other) { return true; }
172  auto const* cast = dynamic_cast<limiting_resource_adaptor<Upstream> const*>(&other);
173  if (cast == nullptr) { return false; }
174  return get_upstream_resource() == cast->get_upstream_resource();
175  }
176 
177  // The upstream resource used for satisfying allocation requests
178  device_async_resource_ref upstream_;
179 
180  // maximum bytes this allocator is allowed to allocate.
181  std::size_t allocation_limit_;
182 
183  // number of currently-allocated bytes
184  std::atomic<std::size_t> allocated_bytes_;
185 
186  // todo: should be some way to ask the upstream...
187  std::size_t alignment_;
188 };
189  // end of group
191 } // namespace mr
192 } // namespace RMM_NAMESPACE
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:28
Base class for all librmm device memory allocation.
Definition: device_memory_resource.hpp:83
Resource that uses Upstream to allocate memory and limits the total allocations possible.
Definition: limiting_resource_adaptor.hpp:38
limiting_resource_adaptor(device_async_resource_ref upstream, std::size_t allocation_limit, std::size_t alignment=CUDA_ALLOCATION_ALIGNMENT)
Construct a new limiting resource adaptor using upstream to satisfy allocation requests and limiting ...
Definition: limiting_resource_adaptor.hpp:48
limiting_resource_adaptor(limiting_resource_adaptor &&) noexcept=default
Default move constructor.
std::size_t get_allocated_bytes() const
Query the number of bytes that have been allocated. Note that this can not be used to know how large ...
Definition: limiting_resource_adaptor.hpp:104
std::size_t get_allocation_limit() const
Query the maximum number of bytes that this allocator is allowed to allocate. This is the limit on th...
Definition: limiting_resource_adaptor.hpp:113
limiting_resource_adaptor(Upstream *upstream, std::size_t allocation_limit, std::size_t alignment=CUDA_ALLOCATION_ALIGNMENT)
Construct a new limiting resource adaptor using upstream to satisfy allocation requests and limiting ...
Definition: limiting_resource_adaptor.hpp:68
Exception thrown when RMM runs out of memory.
Definition: error.hpp:76
device_async_resource_ref to_device_async_resource_ref_checked(Resource *res)
Convert pointer to memory resource into device_async_resource_ref, checking for nullptr
Definition: resource_ref.hpp:72
detail::cccl_async_resource_ref< cuda::mr::resource_ref< cuda::mr::device_accessible > > device_async_resource_ref
Alias for a cuda::mr::async_resource_ref with the property cuda::mr::device_accessible.
Definition: resource_ref.hpp:32
static constexpr std::size_t CUDA_ALLOCATION_ALIGNMENT
Default alignment used for CUDA memory allocation.
Definition: aligned.hpp:31
std::size_t align_up(std::size_t value, std::size_t alignment) noexcept
Align up to nearest multiple of specified power of 2.
Management of per-device device_memory_resources.