cuda_memory_resource.hpp
1 /*
2  * Copyright (c) 2019, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include "device_memory_resource.hpp"
19 
20 #include <rmm/cuda_stream_view.hpp>
21 #include <rmm/detail/error.hpp>
22 
23 namespace rmm {
24 namespace mr {
30  public:
31  cuda_memory_resource() = default;
32  ~cuda_memory_resource() = default;
35  cuda_memory_resource& operator=(cuda_memory_resource const&) = default;
36  cuda_memory_resource& operator=(cuda_memory_resource&&) = default;
37 
44  bool supports_streams() const noexcept override { return false; }
45 
51  bool supports_get_mem_info() const noexcept override { return true; }
52 
53  private:
66  void* do_allocate(std::size_t bytes, cuda_stream_view) override
67  {
68  void* p{nullptr};
69  RMM_CUDA_TRY(cudaMalloc(&p, bytes), rmm::bad_alloc);
70  return p;
71  }
72 
82  void do_deallocate(void* p, std::size_t, cuda_stream_view) override
83  {
84  RMM_ASSERT_CUDA_SUCCESS(cudaFree(p));
85  }
86 
99  bool do_is_equal(device_memory_resource const& other) const noexcept override
100  {
101  return dynamic_cast<cuda_memory_resource const*>(&other) != nullptr;
102  }
103 
111  std::pair<size_t, size_t> do_get_mem_info(cuda_stream_view) const override
112  {
113  std::size_t free_size;
114  std::size_t total_size;
115  RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
116  return std::make_pair(free_size, total_size);
117  }
118 };
119 } // namespace mr
120 } // namespace rmm
rmm::mr::cuda_memory_resource::supports_get_mem_info
bool supports_get_mem_info() const noexcept override
Query whether the resource supports the get_mem_info API.
Definition: cuda_memory_resource.hpp:51
rmm::bad_alloc
Exception thrown when an RMM allocation fails.
Definition: error.hpp:52
rmm::cuda_stream_view
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:34
rmm::mr::cuda_memory_resource::supports_streams
bool supports_streams() const noexcept override
Query whether the resource supports use of non-null CUDA streams for allocation/deallocation....
Definition: cuda_memory_resource.hpp:44
rmm::mr::device_memory_resource
Base class for all libcudf device memory allocation.
Definition: device_memory_resource.hpp:83
rmm::mr::cuda_memory_resource
device_memory_resource derived class that uses cudaMalloc/Free for allocation/deallocation.
Definition: cuda_memory_resource.hpp:29