managed_memory_resource.hpp
1 /*
2  * Copyright (c) 2019-2021, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <rmm/mr/device/device_memory_resource.hpp>
19 
20 #include <rmm/cuda_stream_view.hpp>
21 #include <rmm/detail/error.hpp>
22 
23 #include <cstddef>
24 
25 namespace rmm::mr {
31  public:
32  managed_memory_resource() = default;
33  ~managed_memory_resource() override = default;
36  managed_memory_resource& operator=(managed_memory_resource const&) = default;
37  managed_memory_resource& operator=(managed_memory_resource&&) = default;
38 
45  [[nodiscard]] bool supports_streams() const noexcept override { return false; }
46 
52  [[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }
53 
54  private:
67  void* do_allocate(std::size_t bytes, cuda_stream_view) override
68  {
69  // FIXME: Unlike cudaMalloc, cudaMallocManaged will throw an error for 0
70  // size allocations.
71  if (bytes == 0) { return nullptr; }
72 
73  void* ptr{nullptr};
74  RMM_CUDA_TRY_ALLOC(cudaMallocManaged(&ptr, bytes));
75  return ptr;
76  }
77 
87  void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override
88  {
89  RMM_ASSERT_CUDA_SUCCESS(cudaFree(ptr));
90  }
91 
104  [[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
105  {
106  return dynamic_cast<managed_memory_resource const*>(&other) != nullptr;
107  }
108 
117  [[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
118  cuda_stream_view stream) const override
119  {
120  std::size_t free_size{};
121  std::size_t total_size{};
122  RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
123  return std::make_pair(free_size, total_size);
124  }
125 };
126 
127 } // namespace rmm::mr
rmm::cuda_stream_view
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:34
rmm::mr::managed_memory_resource
device_memory_resource derived class that uses cudaMallocManaged/Free for allocation/deallocation.
Definition: managed_memory_resource.hpp:30
rmm::mr::managed_memory_resource::supports_get_mem_info
bool supports_get_mem_info() const noexcept override
Query whether the resource supports the get_mem_info API.
Definition: managed_memory_resource.hpp:52
rmm::mr::managed_memory_resource::supports_streams
bool supports_streams() const noexcept override
Query whether the resource supports use of non-null streams for allocation/deallocation.
Definition: managed_memory_resource.hpp:45
rmm::mr::device_memory_resource
Base class for all libcudf device memory allocation.
Definition: device_memory_resource.hpp:82