1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3 /*
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * Copyright 2020 Advanced Micro Devices, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors: Christian König
26 */
27
28 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
29
30 #include <linux/mm.h>
31
32 #include <drm/ttm/ttm_device.h>
33 #include <drm/ttm/ttm_tt.h>
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_bo_api.h>
36
37 #include "ttm_module.h"
38
39 /*
40 * ttm_global_mutex - protecting the global state
41 */
42 static DEFINE_MUTEX(ttm_global_mutex);
43 static unsigned ttm_glob_use_count;
44 struct ttm_global ttm_glob;
45 EXPORT_SYMBOL(ttm_glob);
46
47 struct dentry *ttm_debugfs_root;
48
ttm_global_release(void)49 static void ttm_global_release(void)
50 {
51 struct ttm_global *glob = &ttm_glob;
52
53 mutex_lock(&ttm_global_mutex);
54 if (--ttm_glob_use_count > 0)
55 goto out;
56
57 ttm_pool_mgr_fini();
58 debugfs_remove(ttm_debugfs_root);
59
60 __free_page(glob->dummy_read_page);
61 memset(glob, 0, sizeof(*glob));
62 out:
63 mutex_unlock(&ttm_global_mutex);
64 }
65
ttm_global_init(void)66 static int ttm_global_init(void)
67 {
68 struct ttm_global *glob = &ttm_glob;
69 unsigned long num_pages, num_dma32;
70 struct sysinfo si;
71 int ret = 0;
72
73 mutex_lock(&ttm_global_mutex);
74 if (++ttm_glob_use_count > 1)
75 goto out;
76
77 si_meminfo(&si);
78
79 ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
80 if (IS_ERR(ttm_debugfs_root)) {
81 ttm_debugfs_root = NULL;
82 }
83
84 /* Limit the number of pages in the pool to about 50% of the total
85 * system memory.
86 */
87 num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
88 num_pages /= 2;
89
90 /* But for DMA32 we limit ourself to only use 2GiB maximum. */
91 num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
92 >> PAGE_SHIFT;
93 num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
94
95 ttm_pool_mgr_init(num_pages);
96 ttm_tt_mgr_init(num_pages, num_dma32);
97
98 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
99
100 if (unlikely(glob->dummy_read_page == NULL)) {
101 ret = -ENOMEM;
102 goto out;
103 }
104
105 INIT_LIST_HEAD(&glob->device_list);
106 atomic_set(&glob->bo_count, 0);
107
108 debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
109 &glob->bo_count);
110 out:
111 if (ret && ttm_debugfs_root)
112 debugfs_remove(ttm_debugfs_root);
113 if (ret)
114 --ttm_glob_use_count;
115 mutex_unlock(&ttm_global_mutex);
116 return ret;
117 }
118
119 /*
120 * A buffer object shrink method that tries to swap out the first
121 * buffer object on the global::swap_lru list.
122 */
ttm_global_swapout(struct ttm_operation_ctx * ctx,gfp_t gfp_flags)123 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
124 {
125 struct ttm_global *glob = &ttm_glob;
126 struct ttm_device *bdev;
127 int ret = 0;
128
129 mutex_lock(&ttm_global_mutex);
130 list_for_each_entry(bdev, &glob->device_list, device_list) {
131 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
132 if (ret > 0) {
133 list_move_tail(&bdev->device_list, &glob->device_list);
134 break;
135 }
136 }
137 mutex_unlock(&ttm_global_mutex);
138 return ret;
139 }
140 EXPORT_SYMBOL(ttm_global_swapout);
141
ttm_device_swapout(struct ttm_device * bdev,struct ttm_operation_ctx * ctx,gfp_t gfp_flags)142 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
143 gfp_t gfp_flags)
144 {
145 struct ttm_resource_manager *man;
146 struct ttm_buffer_object *bo;
147 unsigned i, j;
148 int ret;
149
150 spin_lock(&bdev->lru_lock);
151 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
152 man = ttm_manager_type(bdev, i);
153 if (!man || !man->use_tt)
154 continue;
155
156 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
157 list_for_each_entry(bo, &man->lru[j], lru) {
158 uint32_t num_pages = PFN_UP(bo->base.size);
159
160 ret = ttm_bo_swapout(bo, ctx, gfp_flags);
161 /* ttm_bo_swapout has dropped the lru_lock */
162 if (!ret)
163 return num_pages;
164 if (ret != -EBUSY)
165 return ret;
166 }
167 }
168 }
169 spin_unlock(&bdev->lru_lock);
170 return 0;
171 }
172 EXPORT_SYMBOL(ttm_device_swapout);
173
ttm_device_delayed_workqueue(struct work_struct * work)174 static void ttm_device_delayed_workqueue(struct work_struct *work)
175 {
176 struct ttm_device *bdev =
177 container_of(work, struct ttm_device, wq.work);
178
179 if (!ttm_bo_delayed_delete(bdev, false))
180 schedule_delayed_work(&bdev->wq,
181 ((HZ / 100) < 1) ? 1 : HZ / 100);
182 }
183
184 /**
185 * ttm_device_init
186 *
187 * @bdev: A pointer to a struct ttm_device to initialize.
188 * @funcs: Function table for the device.
189 * @dev: The core kernel device pointer for DMA mappings and allocations.
190 * @mapping: The address space to use for this bo.
191 * @vma_manager: A pointer to a vma manager.
192 * @use_dma_alloc: If coherent DMA allocation API should be used.
193 * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
194 *
195 * Initializes a struct ttm_device:
196 * Returns:
197 * !0: Failure.
198 */
ttm_device_init(struct ttm_device * bdev,struct ttm_device_funcs * funcs,struct device * dev,struct address_space * mapping,struct drm_vma_offset_manager * vma_manager,bool use_dma_alloc,bool use_dma32)199 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
200 struct device *dev, struct address_space *mapping,
201 struct drm_vma_offset_manager *vma_manager,
202 bool use_dma_alloc, bool use_dma32)
203 {
204 struct ttm_global *glob = &ttm_glob;
205 int ret;
206
207 if (WARN_ON(vma_manager == NULL))
208 return -EINVAL;
209
210 ret = ttm_global_init();
211 if (ret)
212 return ret;
213
214 bdev->funcs = funcs;
215
216 ttm_sys_man_init(bdev);
217 ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
218
219 bdev->vma_manager = vma_manager;
220 INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
221 spin_lock_init(&bdev->lru_lock);
222 INIT_LIST_HEAD(&bdev->ddestroy);
223 INIT_LIST_HEAD(&bdev->pinned);
224 bdev->dev_mapping = mapping;
225 mutex_lock(&ttm_global_mutex);
226 list_add_tail(&bdev->device_list, &glob->device_list);
227 mutex_unlock(&ttm_global_mutex);
228
229 return 0;
230 }
231 EXPORT_SYMBOL(ttm_device_init);
232
ttm_device_fini(struct ttm_device * bdev)233 void ttm_device_fini(struct ttm_device *bdev)
234 {
235 struct ttm_resource_manager *man;
236 unsigned i;
237
238 man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
239 ttm_resource_manager_set_used(man, false);
240 ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
241
242 mutex_lock(&ttm_global_mutex);
243 list_del(&bdev->device_list);
244 mutex_unlock(&ttm_global_mutex);
245
246 cancel_delayed_work_sync(&bdev->wq);
247
248 if (ttm_bo_delayed_delete(bdev, true))
249 pr_debug("Delayed destroy list was clean\n");
250
251 spin_lock(&bdev->lru_lock);
252 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
253 if (list_empty(&man->lru[0]))
254 pr_debug("Swap list %d was clean\n", i);
255 spin_unlock(&bdev->lru_lock);
256
257 ttm_pool_fini(&bdev->pool);
258 ttm_global_release();
259 }
260 EXPORT_SYMBOL(ttm_device_fini);
261
ttm_device_clear_dma_mappings(struct ttm_device * bdev)262 void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
263 {
264 struct ttm_resource_manager *man;
265 struct ttm_buffer_object *bo;
266 unsigned int i, j;
267
268 spin_lock(&bdev->lru_lock);
269 while (!list_empty(&bdev->pinned)) {
270 bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
271 /* Take ref against racing releases once lru_lock is unlocked */
272 if (ttm_bo_get_unless_zero(bo)) {
273 list_del_init(&bo->lru);
274 spin_unlock(&bdev->lru_lock);
275
276 if (bo->ttm)
277 ttm_tt_unpopulate(bo->bdev, bo->ttm);
278
279 ttm_bo_put(bo);
280 spin_lock(&bdev->lru_lock);
281 }
282 }
283
284 for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
285 man = ttm_manager_type(bdev, i);
286 if (!man || !man->use_tt)
287 continue;
288
289 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
290 while (!list_empty(&man->lru[j])) {
291 bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
292 if (ttm_bo_get_unless_zero(bo)) {
293 list_del_init(&bo->lru);
294 spin_unlock(&bdev->lru_lock);
295
296 if (bo->ttm)
297 ttm_tt_unpopulate(bo->bdev, bo->ttm);
298
299 ttm_bo_put(bo);
300 spin_lock(&bdev->lru_lock);
301 }
302 }
303 }
304 }
305 spin_unlock(&bdev->lru_lock);
306 }
307 EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
308