pool_memory_resource.hpp
Go to the documentation of this file.
1 /*
2  * SPDX-FileCopyrightText: Copyright (c) 2020-2026, NVIDIA CORPORATION.
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 #pragma once
6 
7 #include <rmm/aligned.hpp>
9 #include <rmm/detail/error.hpp>
10 #include <rmm/detail/export.hpp>
11 #include <rmm/detail/format.hpp>
12 #include <rmm/detail/logging_assert.hpp>
13 #include <rmm/logger.hpp>
14 #include <rmm/mr/detail/coalescing_free_list.hpp>
15 #include <rmm/mr/detail/stream_ordered_memory_resource.hpp>
18 #include <rmm/resource_ref.hpp>
19 
20 #include <cuda/std/type_traits>
21 #include <cuda_runtime_api.h>
22 
23 #include <algorithm>
24 #include <cstddef>
25 #include <mutex>
26 #include <optional>
27 #include <set>
28 
29 namespace RMM_NAMESPACE {
30 namespace mr {
36 namespace detail {
48 template <class PoolResource, class Upstream, class Property, class = void>
50 
54 template <class PoolResource, class Upstream, class Property>
55 struct maybe_remove_property<PoolResource,
56  Upstream,
57  Property,
58  cuda::std::enable_if_t<!cuda::has_property<Upstream, Property>>> {
59 #if defined(__GNUC__) && !defined(__clang__) // GCC warns about compatibility
60  // issues with pre ISO C++ code
61 #pragma GCC diagnostic push
62 #pragma GCC diagnostic ignored "-Wnon-template-friend"
63 #endif // __GNUC__ and not __clang__
68  friend void get_property(const PoolResource&, Property) = delete;
69 #if defined(__GNUC__) && !defined(__clang__)
70 #pragma GCC diagnostic pop
71 #endif // __GNUC__ and not __clang__
72 };
73 } // namespace detail
74 
85 template <typename Upstream>
87  : public detail::
88  maybe_remove_property<pool_memory_resource<Upstream>, Upstream, cuda::mr::device_accessible>,
89  public detail::stream_ordered_memory_resource<pool_memory_resource<Upstream>,
90  detail::coalescing_free_list>,
91  public cuda::forward_property<pool_memory_resource<Upstream>, Upstream> {
92  public:
93  friend class detail::stream_ordered_memory_resource<pool_memory_resource<Upstream>,
94  detail::coalescing_free_list>;
95 
110  explicit pool_memory_resource(device_async_resource_ref upstream_mr,
111  std::size_t initial_pool_size,
112  std::optional<std::size_t> maximum_pool_size = std::nullopt)
113  : upstream_mr_{upstream_mr}
114  {
115  RMM_EXPECTS(rmm::is_aligned(initial_pool_size, rmm::CUDA_ALLOCATION_ALIGNMENT),
116  "Error, Initial pool size required to be a multiple of 256 bytes");
117  RMM_EXPECTS(rmm::is_aligned(maximum_pool_size.value_or(0), rmm::CUDA_ALLOCATION_ALIGNMENT),
118  "Error, Maximum pool size required to be a multiple of 256 bytes");
119 
120  initialize_pool(initial_pool_size, maximum_pool_size);
121  }
122 
138  explicit pool_memory_resource(Upstream* upstream_mr,
139  std::size_t initial_pool_size,
140  std::optional<std::size_t> maximum_pool_size = std::nullopt)
141  : upstream_mr_{to_device_async_resource_ref_checked(upstream_mr)}
142  {
143  RMM_EXPECTS(rmm::is_aligned(initial_pool_size, rmm::CUDA_ALLOCATION_ALIGNMENT),
144  "Error, Initial pool size required to be a multiple of 256 bytes");
145  RMM_EXPECTS(rmm::is_aligned(maximum_pool_size.value_or(0), rmm::CUDA_ALLOCATION_ALIGNMENT),
146  "Error, Maximum pool size required to be a multiple of 256 bytes");
147 
148  initialize_pool(initial_pool_size, maximum_pool_size);
149  }
150 
166  template <typename Upstream2 = Upstream>
167  explicit pool_memory_resource(Upstream2& upstream_mr,
168  std::size_t initial_pool_size,
169  std::optional<std::size_t> maximum_pool_size = std::nullopt)
170  : pool_memory_resource(cuda::std::addressof(upstream_mr), initial_pool_size, maximum_pool_size)
171  {
172  }
173 
178  ~pool_memory_resource() override { release(); }
179 
180  pool_memory_resource() = delete;
183  pool_memory_resource& operator=(pool_memory_resource const&) = delete;
184  pool_memory_resource& operator=(pool_memory_resource&&) = delete;
185 
189  [[nodiscard]] device_async_resource_ref get_upstream_resource() const noexcept
190  {
191  return upstream_mr_;
192  }
193 
201  [[nodiscard]] std::size_t pool_size() const noexcept { return current_pool_size_; }
202 
203  protected:
204  using free_list = detail::coalescing_free_list;
205  using block_type = free_list::block_type;
206  using typename detail::stream_ordered_memory_resource<pool_memory_resource<Upstream>,
207  detail::coalescing_free_list>::split_block;
208  using lock_guard = std::lock_guard<std::mutex>;
209 
218  [[nodiscard]] std::size_t get_maximum_allocation_size() const
219  {
220  return std::numeric_limits<std::size_t>::max();
221  }
222 
238  block_type try_to_expand(std::size_t try_size, std::size_t min_size, cuda_stream_view stream)
239  {
240  auto report_error = [&](const char* reason) {
241  RMM_LOG_ERROR("[A][Stream %s][Upstream %zuB][FAILURE maximum pool size exceeded: %s]",
242  rmm::detail::format_stream(stream),
243  min_size,
244  reason);
245  auto const msg = std::string("Maximum pool size exceeded (failed to allocate ") +
246  rmm::detail::format_bytes(min_size) + std::string("): ") + reason;
247  RMM_FAIL(msg.c_str(), rmm::out_of_memory);
248  };
249 
250  while (try_size >= min_size) {
251  try {
252  auto block = block_from_upstream(try_size, stream);
253  current_pool_size_ += block.size();
254  return block;
255  } catch (std::exception const& e) {
256  if (try_size == min_size) { report_error(e.what()); }
257  }
258  try_size = std::max(min_size, try_size / 2);
259  }
260 
261  auto const max_size = maximum_pool_size_.value_or(std::numeric_limits<std::size_t>::max());
262  auto const msg = std::string("Not enough room to grow, current/max/try size = ") +
263  rmm::detail::format_bytes(pool_size()) + ", " +
264  rmm::detail::format_bytes(max_size) + ", " +
265  rmm::detail::format_bytes(min_size);
266  report_error(msg.c_str());
267  return {};
268  }
269 
278  void initialize_pool(std::size_t initial_size, std::optional<std::size_t> maximum_size)
279  {
280  current_pool_size_ = 0; // try_to_expand will set this if it succeeds
281  maximum_pool_size_ = maximum_size;
282 
283  RMM_EXPECTS(
284  initial_size <= maximum_pool_size_.value_or(std::numeric_limits<std::size_t>::max()),
285  "Initial pool size exceeds the maximum pool size!");
286 
287  if (initial_size > 0) {
288  auto const block = try_to_expand(initial_size, initial_size, cuda_stream_legacy);
289  this->insert_block(block, cuda_stream_legacy);
290  }
291  }
292 
302  block_type expand_pool(std::size_t size,
303  [[maybe_unused]] free_list& blocks,
304  cuda_stream_view stream)
305  {
306  // Strategy: If maximum_pool_size_ is set, then grow geometrically, e.g. by halfway to the
307  // limit each time. If it is not set, grow exponentially, e.g. by doubling the pool size each
308  // time. Upon failure, attempt to back off exponentially, e.g. by half the attempted size,
309  // until either success or the attempt is less than the requested size.
310 
311  return try_to_expand(size_to_grow(size), size, stream);
312  }
313 
326  [[nodiscard]] std::size_t size_to_grow(std::size_t size) const
327  {
328  if (maximum_pool_size_.has_value()) {
329  auto const unaligned_remaining = maximum_pool_size_.value() - pool_size();
330  auto const remaining = rmm::align_up(unaligned_remaining, rmm::CUDA_ALLOCATION_ALIGNMENT);
331  auto const aligned_size = rmm::align_up(size, rmm::CUDA_ALLOCATION_ALIGNMENT);
332  return (aligned_size <= remaining) ? std::max(aligned_size, remaining / 2) : 0;
333  }
334  return std::max(size, pool_size());
335  };
336 
346  {
347  RMM_LOG_DEBUG("[A][Stream %s][Upstream %zuB]", rmm::detail::format_stream(stream), size);
348 
349  if (size == 0) { return {}; }
350 
351  void* ptr = get_upstream_resource().allocate(stream, size);
352  return *upstream_blocks_.emplace(static_cast<char*>(ptr), size, true).first;
353  }
354 
365  split_block allocate_from_block(block_type const& block, std::size_t size)
366  {
367  block_type const alloc{block.pointer(), size, block.is_head()};
368 #ifdef RMM_POOL_TRACK_ALLOCATIONS
369  allocated_blocks_.insert(alloc);
370 #endif
371 
372  auto rest = (block.size() > size)
373  // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
374  ? block_type{block.pointer() + size, block.size() - size, false}
375  : block_type{};
376  return {alloc, rest};
377  }
378 
387  block_type free_block(void* ptr, std::size_t size) noexcept
388  {
389 #ifdef RMM_POOL_TRACK_ALLOCATIONS
390  if (ptr == nullptr) return block_type{};
391  auto const iter = allocated_blocks_.find(static_cast<char*>(ptr));
392  RMM_LOGGING_ASSERT(iter != allocated_blocks_.end());
393 
394  auto block = *iter;
395  RMM_LOGGING_ASSERT(block.size() == rmm::align_up(size, allocation_alignment));
396  allocated_blocks_.erase(iter);
397 
398  return block;
399 #else
400  auto const iter = upstream_blocks_.find(static_cast<char*>(ptr));
401  return block_type{static_cast<char*>(ptr), size, (iter != upstream_blocks_.end())};
402 #endif
403  }
404 
409  void release()
410  {
411  lock_guard lock(this->get_mutex());
412 
413  for (auto block : upstream_blocks_) {
414  get_upstream_resource().deallocate_sync(block.pointer(), block.size());
415  }
416  upstream_blocks_.clear();
417 #ifdef RMM_POOL_TRACK_ALLOCATIONS
418  allocated_blocks_.clear();
419 #endif
420 
421  current_pool_size_ = 0;
422  }
423 
424 #ifdef RMM_DEBUG_PRINT
431  void print()
432  {
433  lock_guard lock(this->get_mutex());
434 
435  auto const [free, total] = rmm::available_device_memory();
436  std::cout << "GPU free memory: " << free << " total: " << total << "\n";
437 
438  std::cout << "upstream_blocks: " << upstream_blocks_.size() << "\n";
439  std::size_t upstream_total{0};
440 
441  for (auto blocks : upstream_blocks_) {
442  blocks.print();
443  upstream_total += blocks.size();
444  }
445  std::cout << "total upstream: " << upstream_total << " B\n";
446 
447 #ifdef RMM_POOL_TRACK_ALLOCATIONS
448  std::cout << "allocated_blocks: " << allocated_blocks_.size() << "\n";
449  for (auto block : allocated_blocks_)
450  block.print();
451 #endif
452 
453  this->print_free_blocks();
454  }
455 #endif
456 
465  std::pair<std::size_t, std::size_t> free_list_summary(free_list const& blocks)
466  {
467  std::size_t largest{};
468  std::size_t total{};
469  std::for_each(blocks.cbegin(), blocks.cend(), [&largest, &total](auto const& block) {
470  total += block.size();
471  largest = std::max(largest, block.size());
472  });
473  return {largest, total};
474  }
475 
476  private:
477  // The "heap" to allocate the pool from
478  device_async_resource_ref upstream_mr_;
479  std::size_t current_pool_size_{};
480  std::optional<std::size_t> maximum_pool_size_{};
481 
482 #ifdef RMM_POOL_TRACK_ALLOCATIONS
483  std::set<block_type, rmm::mr::detail::compare_blocks<block_type>> allocated_blocks_;
484 #endif
485 
486  // blocks allocated from upstream
487  std::set<block_type, rmm::mr::detail::compare_blocks<block_type>> upstream_blocks_;
488 }; // namespace mr
489  // end of group
491 } // namespace mr
492 } // namespace RMM_NAMESPACE
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:28
A coalescing best-fit suballocator which uses a pool of memory allocated from an upstream memory_reso...
Definition: pool_memory_resource.hpp:91
pool_memory_resource(Upstream2 &upstream_mr, std::size_t initial_pool_size, std::optional< std::size_t > maximum_pool_size=std::nullopt)
Construct a pool_memory_resource and allocate the initial device memory pool using upstream_mr.
Definition: pool_memory_resource.hpp:167
block_type free_block(void *ptr, std::size_t size) noexcept
Finds, frees and returns the block associated with pointer ptr.
Definition: pool_memory_resource.hpp:387
void initialize_pool(std::size_t initial_size, std::optional< std::size_t > maximum_size)
Allocate initial memory for the pool.
Definition: pool_memory_resource.hpp:278
split_block allocate_from_block(block_type const &block, std::size_t size)
Splits block if necessary to return a pointer to memory of size bytes.
Definition: pool_memory_resource.hpp:365
device_async_resource_ref get_upstream_resource() const noexcept
rmm::device_async_resource_ref to the upstream resource
Definition: pool_memory_resource.hpp:189
std::size_t size_to_grow(std::size_t size) const
Given a minimum size, computes an appropriate size to grow the pool.
Definition: pool_memory_resource.hpp:326
block_type expand_pool(std::size_t size, [[maybe_unused]] free_list &blocks, cuda_stream_view stream)
Allocate space from upstream to supply the suballocation pool and return a sufficiently sized block.
Definition: pool_memory_resource.hpp:302
free_list::block_type block_type
The type of block returned by the free list.
Definition: pool_memory_resource.hpp:205
std::pair< std::size_t, std::size_t > free_list_summary(free_list const &blocks)
Get the largest available block size and total free size in the specified free list.
Definition: pool_memory_resource.hpp:465
std::size_t get_maximum_allocation_size() const
Get the maximum size of allocations supported by this memory resource.
Definition: pool_memory_resource.hpp:218
void release()
Free all memory allocated from the upstream memory_resource.
Definition: pool_memory_resource.hpp:409
block_type try_to_expand(std::size_t try_size, std::size_t min_size, cuda_stream_view stream)
Try to expand the pool by allocating a block of at least min_size bytes from upstream.
Definition: pool_memory_resource.hpp:238
std::lock_guard< std::mutex > lock_guard
Type of lock used to synchronize access.
Definition: pool_memory_resource.hpp:208
std::size_t pool_size() const noexcept
Computes the size of the current pool.
Definition: pool_memory_resource.hpp:201
~pool_memory_resource() override
Destroy the pool_memory_resource and deallocate all memory it allocated using the upstream resource.
Definition: pool_memory_resource.hpp:178
detail::coalescing_free_list free_list
The free list implementation.
Definition: pool_memory_resource.hpp:204
pool_memory_resource(Upstream *upstream_mr, std::size_t initial_pool_size, std::optional< std::size_t > maximum_pool_size=std::nullopt)
Construct a pool_memory_resource and allocate the initial device memory pool using upstream_mr.
Definition: pool_memory_resource.hpp:138
block_type block_from_upstream(std::size_t size, cuda_stream_view stream)
Allocate a block from upstream to expand the suballocation pool.
Definition: pool_memory_resource.hpp:345
Exception thrown when RMM runs out of memory.
Definition: error.hpp:76
std::pair< std::size_t, std::size_t > available_device_memory()
Returns the available and total device memory in bytes for the current device.
static const cuda_stream_view cuda_stream_legacy
Static cuda_stream_view of cudaStreamLegacy, for convenience.
Definition: cuda_stream_view.hpp:116
device_async_resource_ref to_device_async_resource_ref_checked(Resource *res)
Convert pointer to memory resource into device_async_resource_ref, checking for nullptr
Definition: resource_ref.hpp:72
detail::cccl_async_resource_ref< cuda::mr::resource_ref< cuda::mr::device_accessible > > device_async_resource_ref
Alias for a cuda::mr::async_resource_ref with the property cuda::mr::device_accessible.
Definition: resource_ref.hpp:32
static constexpr std::size_t CUDA_ALLOCATION_ALIGNMENT
Default alignment used for CUDA memory allocation.
Definition: aligned.hpp:25
bool is_aligned(std::size_t value, std::size_t alignment) noexcept
Checks whether a value is aligned to a multiple of a specified power of 2.
std::size_t align_up(std::size_t value, std::size_t alignment) noexcept
Align up to nearest multiple of specified power of 2.
Management of per-device device_memory_resources.
A helper class to remove the device_accessible property.
Definition: pool_memory_resource.hpp:49