RMM  23.12
RAPIDS Memory Manager
aligned_resource_adaptor.hpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2021, NVIDIA CORPORATION.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <rmm/cuda_stream_view.hpp>
19 #include <rmm/detail/aligned.hpp>
20 #include <rmm/detail/error.hpp>
22 
23 #include <cstddef>
24 #include <mutex>
25 #include <optional>
26 #include <unordered_map>
27 
28 namespace rmm::mr {
53 template <typename Upstream>
55  public:
67  explicit aligned_resource_adaptor(Upstream* upstream,
68  std::size_t alignment = rmm::detail::CUDA_ALLOCATION_ALIGNMENT,
69  std::size_t alignment_threshold = default_alignment_threshold)
70  : upstream_{upstream}, alignment_{alignment}, alignment_threshold_{alignment_threshold}
71  {
72  RMM_EXPECTS(nullptr != upstream, "Unexpected null upstream resource pointer.");
73  RMM_EXPECTS(rmm::detail::is_supported_alignment(alignment),
74  "Allocation alignment is not a power of 2.");
75  }
76 
77  aligned_resource_adaptor() = delete;
78  ~aligned_resource_adaptor() override = default;
81  aligned_resource_adaptor& operator=(aligned_resource_adaptor const&) = delete;
83 
89  Upstream* get_upstream() const noexcept { return upstream_; }
90 
94  [[nodiscard]] bool supports_streams() const noexcept override
95  {
96  return upstream_->supports_streams();
97  }
98 
104  [[nodiscard]] bool supports_get_mem_info() const noexcept override
105  {
106  return upstream_->supports_get_mem_info();
107  }
108 
112  static constexpr std::size_t default_alignment_threshold = 0;
113 
114  private:
115  using lock_guard = std::lock_guard<std::mutex>;
116 
128  void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
129  {
130  if (alignment_ == rmm::detail::CUDA_ALLOCATION_ALIGNMENT || bytes < alignment_threshold_) {
131  return upstream_->allocate(bytes, stream);
132  }
133  auto const size = upstream_allocation_size(bytes);
134  void* pointer = upstream_->allocate(size, stream);
135  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
136  auto const address = reinterpret_cast<std::size_t>(pointer);
137  auto const aligned_address = rmm::detail::align_up(address, alignment_);
138  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast,performance-no-int-to-ptr)
139  void* aligned_pointer = reinterpret_cast<void*>(aligned_address);
140  if (pointer != aligned_pointer) {
141  lock_guard lock(mtx_);
142  pointers_.emplace(aligned_pointer, pointer);
143  }
144  return aligned_pointer;
145  }
146 
154  void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override
155  {
156  if (alignment_ == rmm::detail::CUDA_ALLOCATION_ALIGNMENT || bytes < alignment_threshold_) {
157  upstream_->deallocate(ptr, bytes, stream);
158  } else {
159  {
160  lock_guard lock(mtx_);
161  auto const iter = pointers_.find(ptr);
162  if (iter != pointers_.end()) {
163  ptr = iter->second;
164  pointers_.erase(iter);
165  }
166  }
167  upstream_->deallocate(ptr, upstream_allocation_size(bytes), stream);
168  }
169  }
170 
178  [[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
179  {
180  if (this == &other) { return true; }
181  auto cast = dynamic_cast<aligned_resource_adaptor<Upstream> const*>(&other);
182  return cast != nullptr && upstream_->is_equal(*cast->get_upstream()) &&
183  alignment_ == cast->alignment_ && alignment_threshold_ == cast->alignment_threshold_;
184  }
185 
196  [[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
197  cuda_stream_view stream) const override
198  {
199  return upstream_->get_mem_info(stream);
200  }
201 
209  std::size_t upstream_allocation_size(std::size_t bytes) const
210  {
211  auto const aligned_size = rmm::detail::align_up(bytes, alignment_);
212  return aligned_size + alignment_ - rmm::detail::CUDA_ALLOCATION_ALIGNMENT;
213  }
214 
215  Upstream* upstream_;
216  std::unordered_map<void*, void*> pointers_;
217  std::size_t alignment_;
218  std::size_t alignment_threshold_;
219  mutable std::mutex mtx_;
220 };
221  // end of group
223 } // namespace rmm::mr
Strongly-typed non-owning wrapper for CUDA streams with default constructor.
Definition: cuda_stream_view.hpp:41
Resource that adapts Upstream memory resource to allocate memory in a specified alignment size.
Definition: aligned_resource_adaptor.hpp:54
bool supports_get_mem_info() const noexcept override
Query whether the resource supports the get_mem_info API.
Definition: aligned_resource_adaptor.hpp:104
static constexpr std::size_t default_alignment_threshold
The default alignment used by the adaptor.
Definition: aligned_resource_adaptor.hpp:112
aligned_resource_adaptor(Upstream *upstream, std::size_t alignment=rmm::detail::CUDA_ALLOCATION_ALIGNMENT, std::size_t alignment_threshold=default_alignment_threshold)
Construct an aligned resource adaptor using upstream to satisfy allocation requests.
Definition: aligned_resource_adaptor.hpp:67
Upstream * get_upstream() const noexcept
Get the upstream memory resource.
Definition: aligned_resource_adaptor.hpp:89
bool supports_streams() const noexcept override
Query whether the resource supports use of non-null CUDA streams for allocation/deallocation.
Definition: aligned_resource_adaptor.hpp:94
Base class for all libcudf device memory allocation.
Definition: device_memory_resource.hpp:89