1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
30 #include "amdgpu.h"
31 #include "amdgpu_xgmi.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35
36 #ifdef dev_fmt
37 #undef dev_fmt
38 #endif
39 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
40
41 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
42
43 /* Long enough to ensure no retry fault comes after svm range is restored and
44 * page table is updated.
45 */
46 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING 2000
47
48 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
49 static bool
50 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
51 const struct mmu_notifier_range *range,
52 unsigned long cur_seq);
53 static int
54 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
55 uint64_t *bo_s, uint64_t *bo_l);
56 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
57 .invalidate = svm_range_cpu_invalidate_pagetables,
58 };
59
60 /**
61 * svm_range_unlink - unlink svm_range from lists and interval tree
62 * @prange: svm range structure to be removed
63 *
64 * Remove the svm_range from the svms and svm_bo lists and the svms
65 * interval tree.
66 *
67 * Context: The caller must hold svms->lock
68 */
svm_range_unlink(struct svm_range * prange)69 static void svm_range_unlink(struct svm_range *prange)
70 {
71 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
72 prange, prange->start, prange->last);
73
74 if (prange->svm_bo) {
75 spin_lock(&prange->svm_bo->list_lock);
76 list_del(&prange->svm_bo_list);
77 spin_unlock(&prange->svm_bo->list_lock);
78 }
79
80 list_del(&prange->list);
81 if (prange->it_node.start != 0 && prange->it_node.last != 0)
82 interval_tree_remove(&prange->it_node, &prange->svms->objects);
83 }
84
85 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)86 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
87 {
88 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
89 prange, prange->start, prange->last);
90
91 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
92 prange->start << PAGE_SHIFT,
93 prange->npages << PAGE_SHIFT,
94 &svm_range_mn_ops);
95 }
96
97 /**
98 * svm_range_add_to_svms - add svm range to svms
99 * @prange: svm range structure to be added
100 *
101 * Add the svm range to svms interval tree and link list
102 *
103 * Context: The caller must hold svms->lock
104 */
svm_range_add_to_svms(struct svm_range * prange)105 static void svm_range_add_to_svms(struct svm_range *prange)
106 {
107 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
108 prange, prange->start, prange->last);
109
110 list_add_tail(&prange->list, &prange->svms->list);
111 prange->it_node.start = prange->start;
112 prange->it_node.last = prange->last;
113 interval_tree_insert(&prange->it_node, &prange->svms->objects);
114 }
115
svm_range_remove_notifier(struct svm_range * prange)116 static void svm_range_remove_notifier(struct svm_range *prange)
117 {
118 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
119 prange->svms, prange,
120 prange->notifier.interval_tree.start >> PAGE_SHIFT,
121 prange->notifier.interval_tree.last >> PAGE_SHIFT);
122
123 if (prange->notifier.interval_tree.start != 0 &&
124 prange->notifier.interval_tree.last != 0)
125 mmu_interval_notifier_remove(&prange->notifier);
126 }
127
128 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)129 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
130 {
131 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
132 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
133 }
134
135 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)136 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
137 unsigned long offset, unsigned long npages,
138 unsigned long *hmm_pfns, uint32_t gpuidx)
139 {
140 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
141 dma_addr_t *addr = prange->dma_addr[gpuidx];
142 struct device *dev = adev->dev;
143 struct page *page;
144 int i, r;
145
146 if (!addr) {
147 addr = kvmalloc_array(prange->npages, sizeof(*addr),
148 GFP_KERNEL | __GFP_ZERO);
149 if (!addr)
150 return -ENOMEM;
151 prange->dma_addr[gpuidx] = addr;
152 }
153
154 addr += offset;
155 for (i = 0; i < npages; i++) {
156 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
157 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
158
159 page = hmm_pfn_to_page(hmm_pfns[i]);
160 if (is_zone_device_page(page)) {
161 struct amdgpu_device *bo_adev =
162 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
163
164 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
165 bo_adev->vm_manager.vram_base_offset -
166 bo_adev->kfd.dev->pgmap.range.start;
167 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
168 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
169 continue;
170 }
171 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
172 r = dma_mapping_error(dev, addr[i]);
173 if (r) {
174 dev_err(dev, "failed %d dma_map_page\n", r);
175 return r;
176 }
177 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
178 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
179 }
180 return 0;
181 }
182
183 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)184 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
185 unsigned long offset, unsigned long npages,
186 unsigned long *hmm_pfns)
187 {
188 struct kfd_process *p;
189 uint32_t gpuidx;
190 int r;
191
192 p = container_of(prange->svms, struct kfd_process, svms);
193
194 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
195 struct kfd_process_device *pdd;
196 struct amdgpu_device *adev;
197
198 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
199 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
200 if (!pdd) {
201 pr_debug("failed to find device idx %d\n", gpuidx);
202 return -EINVAL;
203 }
204 adev = (struct amdgpu_device *)pdd->dev->kgd;
205
206 r = svm_range_dma_map_dev(adev, prange, offset, npages,
207 hmm_pfns, gpuidx);
208 if (r)
209 break;
210 }
211
212 return r;
213 }
214
svm_range_dma_unmap(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)215 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
216 unsigned long offset, unsigned long npages)
217 {
218 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
219 int i;
220
221 if (!dma_addr)
222 return;
223
224 for (i = offset; i < offset + npages; i++) {
225 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
226 continue;
227 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
228 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
229 dma_addr[i] = 0;
230 }
231 }
232
svm_range_free_dma_mappings(struct svm_range * prange)233 void svm_range_free_dma_mappings(struct svm_range *prange)
234 {
235 struct kfd_process_device *pdd;
236 dma_addr_t *dma_addr;
237 struct device *dev;
238 struct kfd_process *p;
239 uint32_t gpuidx;
240
241 p = container_of(prange->svms, struct kfd_process, svms);
242
243 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
244 dma_addr = prange->dma_addr[gpuidx];
245 if (!dma_addr)
246 continue;
247
248 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
249 if (!pdd) {
250 pr_debug("failed to find device idx %d\n", gpuidx);
251 continue;
252 }
253 dev = &pdd->dev->pdev->dev;
254 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
255 kvfree(dma_addr);
256 prange->dma_addr[gpuidx] = NULL;
257 }
258 }
259
svm_range_free(struct svm_range * prange)260 static void svm_range_free(struct svm_range *prange)
261 {
262 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
263 prange->start, prange->last);
264
265 svm_range_vram_node_free(prange);
266 svm_range_free_dma_mappings(prange);
267 mutex_destroy(&prange->lock);
268 mutex_destroy(&prange->migrate_mutex);
269 kfree(prange);
270 }
271
272 static void
svm_range_set_default_attributes(int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)273 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
274 uint8_t *granularity, uint32_t *flags)
275 {
276 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
277 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
278 *granularity = 9;
279 *flags =
280 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
281 }
282
283 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last)284 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
285 uint64_t last)
286 {
287 uint64_t size = last - start + 1;
288 struct svm_range *prange;
289 struct kfd_process *p;
290
291 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
292 if (!prange)
293 return NULL;
294 prange->npages = size;
295 prange->svms = svms;
296 prange->start = start;
297 prange->last = last;
298 INIT_LIST_HEAD(&prange->list);
299 INIT_LIST_HEAD(&prange->update_list);
300 INIT_LIST_HEAD(&prange->remove_list);
301 INIT_LIST_HEAD(&prange->insert_list);
302 INIT_LIST_HEAD(&prange->svm_bo_list);
303 INIT_LIST_HEAD(&prange->deferred_list);
304 INIT_LIST_HEAD(&prange->child_list);
305 atomic_set(&prange->invalid, 0);
306 prange->validate_timestamp = 0;
307 mutex_init(&prange->migrate_mutex);
308 mutex_init(&prange->lock);
309
310 p = container_of(svms, struct kfd_process, svms);
311 if (p->xnack_enabled)
312 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
313 MAX_GPU_INSTANCE);
314
315 svm_range_set_default_attributes(&prange->preferred_loc,
316 &prange->prefetch_loc,
317 &prange->granularity, &prange->flags);
318
319 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
320
321 return prange;
322 }
323
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)324 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
325 {
326 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
327 return false;
328
329 return true;
330 }
331
svm_range_bo_release(struct kref * kref)332 static void svm_range_bo_release(struct kref *kref)
333 {
334 struct svm_range_bo *svm_bo;
335
336 svm_bo = container_of(kref, struct svm_range_bo, kref);
337 spin_lock(&svm_bo->list_lock);
338 while (!list_empty(&svm_bo->range_list)) {
339 struct svm_range *prange =
340 list_first_entry(&svm_bo->range_list,
341 struct svm_range, svm_bo_list);
342 /* list_del_init tells a concurrent svm_range_vram_node_new when
343 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
344 */
345 list_del_init(&prange->svm_bo_list);
346 spin_unlock(&svm_bo->list_lock);
347
348 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
349 prange->start, prange->last);
350 mutex_lock(&prange->lock);
351 prange->svm_bo = NULL;
352 mutex_unlock(&prange->lock);
353
354 spin_lock(&svm_bo->list_lock);
355 }
356 spin_unlock(&svm_bo->list_lock);
357 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
358 /* We're not in the eviction worker.
359 * Signal the fence and synchronize with any
360 * pending eviction work.
361 */
362 dma_fence_signal(&svm_bo->eviction_fence->base);
363 cancel_work_sync(&svm_bo->eviction_work);
364 }
365 dma_fence_put(&svm_bo->eviction_fence->base);
366 amdgpu_bo_unref(&svm_bo->bo);
367 kfree(svm_bo);
368 }
369
svm_range_bo_unref(struct svm_range_bo * svm_bo)370 void svm_range_bo_unref(struct svm_range_bo *svm_bo)
371 {
372 if (!svm_bo)
373 return;
374
375 kref_put(&svm_bo->kref, svm_range_bo_release);
376 }
377
378 static bool
svm_range_validate_svm_bo(struct amdgpu_device * adev,struct svm_range * prange)379 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
380 {
381 struct amdgpu_device *bo_adev;
382
383 mutex_lock(&prange->lock);
384 if (!prange->svm_bo) {
385 mutex_unlock(&prange->lock);
386 return false;
387 }
388 if (prange->ttm_res) {
389 /* We still have a reference, all is well */
390 mutex_unlock(&prange->lock);
391 return true;
392 }
393 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
394 /*
395 * Migrate from GPU to GPU, remove range from source bo_adev
396 * svm_bo range list, and return false to allocate svm_bo from
397 * destination adev.
398 */
399 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
400 if (bo_adev != adev) {
401 mutex_unlock(&prange->lock);
402
403 spin_lock(&prange->svm_bo->list_lock);
404 list_del_init(&prange->svm_bo_list);
405 spin_unlock(&prange->svm_bo->list_lock);
406
407 svm_range_bo_unref(prange->svm_bo);
408 return false;
409 }
410 if (READ_ONCE(prange->svm_bo->evicting)) {
411 struct dma_fence *f;
412 struct svm_range_bo *svm_bo;
413 /* The BO is getting evicted,
414 * we need to get a new one
415 */
416 mutex_unlock(&prange->lock);
417 svm_bo = prange->svm_bo;
418 f = dma_fence_get(&svm_bo->eviction_fence->base);
419 svm_range_bo_unref(prange->svm_bo);
420 /* wait for the fence to avoid long spin-loop
421 * at list_empty_careful
422 */
423 dma_fence_wait(f, false);
424 dma_fence_put(f);
425 } else {
426 /* The BO was still around and we got
427 * a new reference to it
428 */
429 mutex_unlock(&prange->lock);
430 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
431 prange->svms, prange->start, prange->last);
432
433 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
434 return true;
435 }
436
437 } else {
438 mutex_unlock(&prange->lock);
439 }
440
441 /* We need a new svm_bo. Spin-loop to wait for concurrent
442 * svm_range_bo_release to finish removing this range from
443 * its range list. After this, it is safe to reuse the
444 * svm_bo pointer and svm_bo_list head.
445 */
446 while (!list_empty_careful(&prange->svm_bo_list))
447 ;
448
449 return false;
450 }
451
svm_range_bo_new(void)452 static struct svm_range_bo *svm_range_bo_new(void)
453 {
454 struct svm_range_bo *svm_bo;
455
456 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
457 if (!svm_bo)
458 return NULL;
459
460 kref_init(&svm_bo->kref);
461 INIT_LIST_HEAD(&svm_bo->range_list);
462 spin_lock_init(&svm_bo->list_lock);
463
464 return svm_bo;
465 }
466
467 int
svm_range_vram_node_new(struct amdgpu_device * adev,struct svm_range * prange,bool clear)468 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
469 bool clear)
470 {
471 struct amdgpu_bo_param bp;
472 struct svm_range_bo *svm_bo;
473 struct amdgpu_bo_user *ubo;
474 struct amdgpu_bo *bo;
475 struct kfd_process *p;
476 struct mm_struct *mm;
477 int r;
478
479 p = container_of(prange->svms, struct kfd_process, svms);
480 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
481 prange->start, prange->last);
482
483 if (svm_range_validate_svm_bo(adev, prange))
484 return 0;
485
486 svm_bo = svm_range_bo_new();
487 if (!svm_bo) {
488 pr_debug("failed to alloc svm bo\n");
489 return -ENOMEM;
490 }
491 mm = get_task_mm(p->lead_thread);
492 if (!mm) {
493 pr_debug("failed to get mm\n");
494 kfree(svm_bo);
495 return -ESRCH;
496 }
497 svm_bo->svms = prange->svms;
498 svm_bo->eviction_fence =
499 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
500 mm,
501 svm_bo);
502 mmput(mm);
503 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
504 svm_bo->evicting = 0;
505 memset(&bp, 0, sizeof(bp));
506 bp.size = prange->npages * PAGE_SIZE;
507 bp.byte_align = PAGE_SIZE;
508 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
509 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
510 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
511 bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
512 bp.type = ttm_bo_type_device;
513 bp.resv = NULL;
514
515 r = amdgpu_bo_create_user(adev, &bp, &ubo);
516 if (r) {
517 pr_debug("failed %d to create bo\n", r);
518 goto create_bo_failed;
519 }
520 bo = &ubo->bo;
521 r = amdgpu_bo_reserve(bo, true);
522 if (r) {
523 pr_debug("failed %d to reserve bo\n", r);
524 goto reserve_bo_failed;
525 }
526
527 r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
528 if (r) {
529 pr_debug("failed %d to reserve bo\n", r);
530 amdgpu_bo_unreserve(bo);
531 goto reserve_bo_failed;
532 }
533 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
534
535 amdgpu_bo_unreserve(bo);
536
537 svm_bo->bo = bo;
538 prange->svm_bo = svm_bo;
539 prange->ttm_res = bo->tbo.resource;
540 prange->offset = 0;
541
542 spin_lock(&svm_bo->list_lock);
543 list_add(&prange->svm_bo_list, &svm_bo->range_list);
544 spin_unlock(&svm_bo->list_lock);
545
546 return 0;
547
548 reserve_bo_failed:
549 amdgpu_bo_unref(&bo);
550 create_bo_failed:
551 dma_fence_put(&svm_bo->eviction_fence->base);
552 kfree(svm_bo);
553 prange->ttm_res = NULL;
554
555 return r;
556 }
557
svm_range_vram_node_free(struct svm_range * prange)558 void svm_range_vram_node_free(struct svm_range *prange)
559 {
560 svm_range_bo_unref(prange->svm_bo);
561 prange->ttm_res = NULL;
562 }
563
564 struct amdgpu_device *
svm_range_get_adev_by_id(struct svm_range * prange,uint32_t gpu_id)565 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
566 {
567 struct kfd_process_device *pdd;
568 struct kfd_process *p;
569 int32_t gpu_idx;
570
571 p = container_of(prange->svms, struct kfd_process, svms);
572
573 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
574 if (gpu_idx < 0) {
575 pr_debug("failed to get device by id 0x%x\n", gpu_id);
576 return NULL;
577 }
578 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
579 if (!pdd) {
580 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
581 return NULL;
582 }
583
584 return (struct amdgpu_device *)pdd->dev->kgd;
585 }
586
587 struct kfd_process_device *
svm_range_get_pdd_by_adev(struct svm_range * prange,struct amdgpu_device * adev)588 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
589 {
590 struct kfd_process *p;
591 int32_t gpu_idx, gpuid;
592 int r;
593
594 p = container_of(prange->svms, struct kfd_process, svms);
595
596 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx);
597 if (r) {
598 pr_debug("failed to get device id by adev %p\n", adev);
599 return NULL;
600 }
601
602 return kfd_process_device_from_gpuidx(p, gpu_idx);
603 }
604
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)605 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
606 {
607 struct ttm_operation_ctx ctx = { false, false };
608
609 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
610
611 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
612 }
613
614 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)615 svm_range_check_attr(struct kfd_process *p,
616 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
617 {
618 uint32_t i;
619
620 for (i = 0; i < nattr; i++) {
621 uint32_t val = attrs[i].value;
622 int gpuidx = MAX_GPU_INSTANCE;
623
624 switch (attrs[i].type) {
625 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
626 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
627 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
628 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
629 break;
630 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
631 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
632 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
633 break;
634 case KFD_IOCTL_SVM_ATTR_ACCESS:
635 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
636 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
637 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
638 break;
639 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
640 break;
641 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
642 break;
643 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
644 break;
645 default:
646 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
647 return -EINVAL;
648 }
649
650 if (gpuidx < 0) {
651 pr_debug("no GPU 0x%x found\n", val);
652 return -EINVAL;
653 } else if (gpuidx < MAX_GPU_INSTANCE &&
654 !test_bit(gpuidx, p->svms.bitmap_supported)) {
655 pr_debug("GPU 0x%x not supported\n", val);
656 return -EINVAL;
657 }
658 }
659
660 return 0;
661 }
662
663 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)664 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
665 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
666 {
667 uint32_t i;
668 int gpuidx;
669
670 for (i = 0; i < nattr; i++) {
671 switch (attrs[i].type) {
672 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
673 prange->preferred_loc = attrs[i].value;
674 break;
675 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
676 prange->prefetch_loc = attrs[i].value;
677 break;
678 case KFD_IOCTL_SVM_ATTR_ACCESS:
679 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
680 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
681 gpuidx = kfd_process_gpuidx_from_gpuid(p,
682 attrs[i].value);
683 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
684 bitmap_clear(prange->bitmap_access, gpuidx, 1);
685 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
686 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
687 bitmap_set(prange->bitmap_access, gpuidx, 1);
688 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
689 } else {
690 bitmap_clear(prange->bitmap_access, gpuidx, 1);
691 bitmap_set(prange->bitmap_aip, gpuidx, 1);
692 }
693 break;
694 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
695 prange->flags |= attrs[i].value;
696 break;
697 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
698 prange->flags &= ~attrs[i].value;
699 break;
700 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
701 prange->granularity = attrs[i].value;
702 break;
703 default:
704 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
705 }
706 }
707 }
708
709 /**
710 * svm_range_debug_dump - print all range information from svms
711 * @svms: svm range list header
712 *
713 * debug output svm range start, end, prefetch location from svms
714 * interval tree and link list
715 *
716 * Context: The caller must hold svms->lock
717 */
svm_range_debug_dump(struct svm_range_list * svms)718 static void svm_range_debug_dump(struct svm_range_list *svms)
719 {
720 struct interval_tree_node *node;
721 struct svm_range *prange;
722
723 pr_debug("dump svms 0x%p list\n", svms);
724 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
725
726 list_for_each_entry(prange, &svms->list, list) {
727 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
728 prange, prange->start, prange->npages,
729 prange->start + prange->npages - 1,
730 prange->actual_loc);
731 }
732
733 pr_debug("dump svms 0x%p interval tree\n", svms);
734 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
735 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
736 while (node) {
737 prange = container_of(node, struct svm_range, it_node);
738 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
739 prange, prange->start, prange->npages,
740 prange->start + prange->npages - 1,
741 prange->actual_loc);
742 node = interval_tree_iter_next(node, 0, ~0ULL);
743 }
744 }
745
746 static bool
svm_range_is_same_attrs(struct svm_range * old,struct svm_range * new)747 svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new)
748 {
749 return (old->prefetch_loc == new->prefetch_loc &&
750 old->flags == new->flags &&
751 old->granularity == new->granularity);
752 }
753
754 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n)755 svm_range_split_array(void *ppnew, void *ppold, size_t size,
756 uint64_t old_start, uint64_t old_n,
757 uint64_t new_start, uint64_t new_n)
758 {
759 unsigned char *new, *old, *pold;
760 uint64_t d;
761
762 if (!ppold)
763 return 0;
764 pold = *(unsigned char **)ppold;
765 if (!pold)
766 return 0;
767
768 new = kvmalloc_array(new_n, size, GFP_KERNEL);
769 if (!new)
770 return -ENOMEM;
771
772 d = (new_start - old_start) * size;
773 memcpy(new, pold + d, new_n * size);
774
775 old = kvmalloc_array(old_n, size, GFP_KERNEL);
776 if (!old) {
777 kvfree(new);
778 return -ENOMEM;
779 }
780
781 d = (new_start == old_start) ? new_n * size : 0;
782 memcpy(old, pold + d, old_n * size);
783
784 kvfree(pold);
785 *(void **)ppold = old;
786 *(void **)ppnew = new;
787
788 return 0;
789 }
790
791 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)792 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
793 uint64_t start, uint64_t last)
794 {
795 uint64_t npages = last - start + 1;
796 int i, r;
797
798 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
799 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
800 sizeof(*old->dma_addr[i]), old->start,
801 npages, new->start, new->npages);
802 if (r)
803 return r;
804 }
805
806 return 0;
807 }
808
809 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)810 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
811 uint64_t start, uint64_t last)
812 {
813 uint64_t npages = last - start + 1;
814
815 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
816 new->svms, new, new->start, start, last);
817
818 if (new->start == old->start) {
819 new->offset = old->offset;
820 old->offset += new->npages;
821 } else {
822 new->offset = old->offset + npages;
823 }
824
825 new->svm_bo = svm_range_bo_ref(old->svm_bo);
826 new->ttm_res = old->ttm_res;
827
828 spin_lock(&new->svm_bo->list_lock);
829 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
830 spin_unlock(&new->svm_bo->list_lock);
831
832 return 0;
833 }
834
835 /**
836 * svm_range_split_adjust - split range and adjust
837 *
838 * @new: new range
839 * @old: the old range
840 * @start: the old range adjust to start address in pages
841 * @last: the old range adjust to last address in pages
842 *
843 * Copy system memory dma_addr or vram ttm_res in old range to new
844 * range from new_start up to size new->npages, the remaining old range is from
845 * start to last
846 *
847 * Return:
848 * 0 - OK, -ENOMEM - out of memory
849 */
850 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)851 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
852 uint64_t start, uint64_t last)
853 {
854 int r;
855
856 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
857 new->svms, new->start, old->start, old->last, start, last);
858
859 if (new->start < old->start ||
860 new->last > old->last) {
861 WARN_ONCE(1, "invalid new range start or last\n");
862 return -EINVAL;
863 }
864
865 r = svm_range_split_pages(new, old, start, last);
866 if (r)
867 return r;
868
869 if (old->actual_loc && old->ttm_res) {
870 r = svm_range_split_nodes(new, old, start, last);
871 if (r)
872 return r;
873 }
874
875 old->npages = last - start + 1;
876 old->start = start;
877 old->last = last;
878 new->flags = old->flags;
879 new->preferred_loc = old->preferred_loc;
880 new->prefetch_loc = old->prefetch_loc;
881 new->actual_loc = old->actual_loc;
882 new->granularity = old->granularity;
883 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
884 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
885
886 return 0;
887 }
888
889 /**
890 * svm_range_split - split a range in 2 ranges
891 *
892 * @prange: the svm range to split
893 * @start: the remaining range start address in pages
894 * @last: the remaining range last address in pages
895 * @new: the result new range generated
896 *
897 * Two cases only:
898 * case 1: if start == prange->start
899 * prange ==> prange[start, last]
900 * new range [last + 1, prange->last]
901 *
902 * case 2: if last == prange->last
903 * prange ==> prange[start, last]
904 * new range [prange->start, start - 1]
905 *
906 * Return:
907 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
908 */
909 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)910 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
911 struct svm_range **new)
912 {
913 uint64_t old_start = prange->start;
914 uint64_t old_last = prange->last;
915 struct svm_range_list *svms;
916 int r = 0;
917
918 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
919 old_start, old_last, start, last);
920
921 if (old_start != start && old_last != last)
922 return -EINVAL;
923 if (start < old_start || last > old_last)
924 return -EINVAL;
925
926 svms = prange->svms;
927 if (old_start == start)
928 *new = svm_range_new(svms, last + 1, old_last);
929 else
930 *new = svm_range_new(svms, old_start, start - 1);
931 if (!*new)
932 return -ENOMEM;
933
934 r = svm_range_split_adjust(*new, prange, start, last);
935 if (r) {
936 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
937 r, old_start, old_last, start, last);
938 svm_range_free(*new);
939 *new = NULL;
940 }
941
942 return r;
943 }
944
945 static int
svm_range_split_tail(struct svm_range * prange,struct svm_range * new,uint64_t new_last,struct list_head * insert_list)946 svm_range_split_tail(struct svm_range *prange, struct svm_range *new,
947 uint64_t new_last, struct list_head *insert_list)
948 {
949 struct svm_range *tail;
950 int r = svm_range_split(prange, prange->start, new_last, &tail);
951
952 if (!r)
953 list_add(&tail->insert_list, insert_list);
954 return r;
955 }
956
957 static int
svm_range_split_head(struct svm_range * prange,struct svm_range * new,uint64_t new_start,struct list_head * insert_list)958 svm_range_split_head(struct svm_range *prange, struct svm_range *new,
959 uint64_t new_start, struct list_head *insert_list)
960 {
961 struct svm_range *head;
962 int r = svm_range_split(prange, new_start, prange->last, &head);
963
964 if (!r)
965 list_add(&head->insert_list, insert_list);
966 return r;
967 }
968
969 static void
svm_range_add_child(struct svm_range * prange,struct mm_struct * mm,struct svm_range * pchild,enum svm_work_list_ops op)970 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
971 struct svm_range *pchild, enum svm_work_list_ops op)
972 {
973 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
974 pchild, pchild->start, pchild->last, prange, op);
975
976 pchild->work_item.mm = mm;
977 pchild->work_item.op = op;
978 list_add_tail(&pchild->child_list, &prange->child_list);
979 }
980
981 /**
982 * svm_range_split_by_granularity - collect ranges within granularity boundary
983 *
984 * @p: the process with svms list
985 * @mm: mm structure
986 * @addr: the vm fault address in pages, to split the prange
987 * @parent: parent range if prange is from child list
988 * @prange: prange to split
989 *
990 * Trims @prange to be a single aligned block of prange->granularity if
991 * possible. The head and tail are added to the child_list in @parent.
992 *
993 * Context: caller must hold mmap_read_lock and prange->lock
994 *
995 * Return:
996 * 0 - OK, otherwise error code
997 */
998 int
svm_range_split_by_granularity(struct kfd_process * p,struct mm_struct * mm,unsigned long addr,struct svm_range * parent,struct svm_range * prange)999 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1000 unsigned long addr, struct svm_range *parent,
1001 struct svm_range *prange)
1002 {
1003 struct svm_range *head, *tail;
1004 unsigned long start, last, size;
1005 int r;
1006
1007 /* Align splited range start and size to granularity size, then a single
1008 * PTE will be used for whole range, this reduces the number of PTE
1009 * updated and the L1 TLB space used for translation.
1010 */
1011 size = 1UL << prange->granularity;
1012 start = ALIGN_DOWN(addr, size);
1013 last = ALIGN(addr + 1, size) - 1;
1014
1015 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1016 prange->svms, prange->start, prange->last, start, last, size);
1017
1018 if (start > prange->start) {
1019 r = svm_range_split(prange, start, prange->last, &head);
1020 if (r)
1021 return r;
1022 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1023 }
1024
1025 if (last < prange->last) {
1026 r = svm_range_split(prange, prange->start, last, &tail);
1027 if (r)
1028 return r;
1029 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1030 }
1031
1032 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1033 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1034 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1035 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1036 prange, prange->start, prange->last,
1037 SVM_OP_ADD_RANGE_AND_MAP);
1038 }
1039 return 0;
1040 }
1041
1042 static uint64_t
svm_range_get_pte_flags(struct amdgpu_device * adev,struct svm_range * prange,int domain)1043 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1044 int domain)
1045 {
1046 struct amdgpu_device *bo_adev;
1047 uint32_t flags = prange->flags;
1048 uint32_t mapping_flags = 0;
1049 uint64_t pte_flags;
1050 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1051 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1052
1053 if (domain == SVM_RANGE_VRAM_DOMAIN)
1054 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1055
1056 switch (adev->asic_type) {
1057 case CHIP_ARCTURUS:
1058 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1059 if (bo_adev == adev) {
1060 mapping_flags |= coherent ?
1061 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1062 } else {
1063 mapping_flags |= coherent ?
1064 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1065 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1066 snoop = true;
1067 }
1068 } else {
1069 mapping_flags |= coherent ?
1070 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1071 }
1072 break;
1073 case CHIP_ALDEBARAN:
1074 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1075 if (bo_adev == adev) {
1076 mapping_flags |= coherent ?
1077 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1078 if (adev->gmc.xgmi.connected_to_cpu)
1079 snoop = true;
1080 } else {
1081 mapping_flags |= coherent ?
1082 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1083 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1084 snoop = true;
1085 }
1086 } else {
1087 mapping_flags |= coherent ?
1088 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1089 }
1090 break;
1091 default:
1092 mapping_flags |= coherent ?
1093 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1094 }
1095
1096 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1097
1098 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1099 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1100 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1101 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1102
1103 pte_flags = AMDGPU_PTE_VALID;
1104 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1105 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1106
1107 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1108 return pte_flags;
1109 }
1110
1111 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1112 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1113 uint64_t start, uint64_t last,
1114 struct dma_fence **fence)
1115 {
1116 uint64_t init_pte_value = 0;
1117
1118 pr_debug("[0x%llx 0x%llx]\n", start, last);
1119
1120 return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1121 start, last, init_pte_value, 0,
1122 NULL, NULL, fence, NULL);
1123 }
1124
1125 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last)1126 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1127 unsigned long last)
1128 {
1129 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1130 struct kfd_process_device *pdd;
1131 struct dma_fence *fence = NULL;
1132 struct amdgpu_device *adev;
1133 struct kfd_process *p;
1134 uint32_t gpuidx;
1135 int r = 0;
1136
1137 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1138 MAX_GPU_INSTANCE);
1139 p = container_of(prange->svms, struct kfd_process, svms);
1140
1141 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1142 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1143 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1144 if (!pdd) {
1145 pr_debug("failed to find device idx %d\n", gpuidx);
1146 return -EINVAL;
1147 }
1148 adev = (struct amdgpu_device *)pdd->dev->kgd;
1149
1150 r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1151 start, last, &fence);
1152 if (r)
1153 break;
1154
1155 if (fence) {
1156 r = dma_fence_wait(fence, false);
1157 dma_fence_put(fence);
1158 fence = NULL;
1159 if (r)
1160 break;
1161 }
1162 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1163 p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1164 }
1165
1166 return r;
1167 }
1168
1169 static int
svm_range_map_to_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence)1170 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1171 struct svm_range *prange, unsigned long offset,
1172 unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1173 struct amdgpu_device *bo_adev, struct dma_fence **fence)
1174 {
1175 struct amdgpu_bo_va bo_va;
1176 bool table_freed = false;
1177 uint64_t pte_flags;
1178 unsigned long last_start;
1179 int last_domain;
1180 int r = 0;
1181 int64_t i, j;
1182
1183 last_start = prange->start + offset;
1184
1185 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1186 last_start, last_start + npages - 1, readonly);
1187
1188 if (prange->svm_bo && prange->ttm_res)
1189 bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
1190
1191 for (i = offset; i < offset + npages; i++) {
1192 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1193 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1194
1195 /* Collect all pages in the same address range and memory domain
1196 * that can be mapped with a single call to update mapping.
1197 */
1198 if (i < offset + npages - 1 &&
1199 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1200 continue;
1201
1202 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1203 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1204
1205 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1206 if (readonly)
1207 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1208
1209 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1210 prange->svms, last_start, prange->start + i,
1211 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1212 pte_flags);
1213
1214 r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1215 NULL, last_start,
1216 prange->start + i, pte_flags,
1217 last_start - prange->start,
1218 NULL, dma_addr,
1219 &vm->last_update,
1220 &table_freed);
1221
1222 for (j = last_start - prange->start; j <= i; j++)
1223 dma_addr[j] |= last_domain;
1224
1225 if (r) {
1226 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1227 goto out;
1228 }
1229 last_start = prange->start + i + 1;
1230 }
1231
1232 r = amdgpu_vm_update_pdes(adev, vm, false);
1233 if (r) {
1234 pr_debug("failed %d to update directories 0x%lx\n", r,
1235 prange->start);
1236 goto out;
1237 }
1238
1239 if (fence)
1240 *fence = dma_fence_get(vm->last_update);
1241
1242 if (table_freed) {
1243 struct kfd_process *p;
1244
1245 p = container_of(prange->svms, struct kfd_process, svms);
1246 amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
1247 p->pasid, TLB_FLUSH_LEGACY);
1248 }
1249 out:
1250 return r;
1251 }
1252
1253 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait)1254 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1255 unsigned long npages, bool readonly,
1256 unsigned long *bitmap, bool wait)
1257 {
1258 struct kfd_process_device *pdd;
1259 struct amdgpu_device *bo_adev;
1260 struct amdgpu_device *adev;
1261 struct kfd_process *p;
1262 struct dma_fence *fence = NULL;
1263 uint32_t gpuidx;
1264 int r = 0;
1265
1266 if (prange->svm_bo && prange->ttm_res)
1267 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1268 else
1269 bo_adev = NULL;
1270
1271 p = container_of(prange->svms, struct kfd_process, svms);
1272 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1273 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1274 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1275 if (!pdd) {
1276 pr_debug("failed to find device idx %d\n", gpuidx);
1277 return -EINVAL;
1278 }
1279 adev = (struct amdgpu_device *)pdd->dev->kgd;
1280
1281 pdd = kfd_bind_process_to_device(pdd->dev, p);
1282 if (IS_ERR(pdd))
1283 return -EINVAL;
1284
1285 if (bo_adev && adev != bo_adev &&
1286 !amdgpu_xgmi_same_hive(adev, bo_adev)) {
1287 pr_debug("cannot map to device idx %d\n", gpuidx);
1288 continue;
1289 }
1290
1291 r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv),
1292 prange, offset, npages, readonly,
1293 prange->dma_addr[gpuidx],
1294 bo_adev, wait ? &fence : NULL);
1295 if (r)
1296 break;
1297
1298 if (fence) {
1299 r = dma_fence_wait(fence, false);
1300 dma_fence_put(fence);
1301 fence = NULL;
1302 if (r) {
1303 pr_debug("failed %d to dma fence wait\n", r);
1304 break;
1305 }
1306 }
1307 }
1308
1309 return r;
1310 }
1311
1312 struct svm_validate_context {
1313 struct kfd_process *process;
1314 struct svm_range *prange;
1315 bool intr;
1316 unsigned long bitmap[MAX_GPU_INSTANCE];
1317 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1318 struct list_head validate_list;
1319 struct ww_acquire_ctx ticket;
1320 };
1321
svm_range_reserve_bos(struct svm_validate_context * ctx)1322 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1323 {
1324 struct kfd_process_device *pdd;
1325 struct amdgpu_device *adev;
1326 struct amdgpu_vm *vm;
1327 uint32_t gpuidx;
1328 int r;
1329
1330 INIT_LIST_HEAD(&ctx->validate_list);
1331 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1332 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1333 if (!pdd) {
1334 pr_debug("failed to find device idx %d\n", gpuidx);
1335 return -EINVAL;
1336 }
1337 adev = (struct amdgpu_device *)pdd->dev->kgd;
1338 vm = drm_priv_to_vm(pdd->drm_priv);
1339
1340 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1341 ctx->tv[gpuidx].num_shared = 4;
1342 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1343 }
1344
1345 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1346 ctx->intr, NULL);
1347 if (r) {
1348 pr_debug("failed %d to reserve bo\n", r);
1349 return r;
1350 }
1351
1352 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1353 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1354 if (!pdd) {
1355 pr_debug("failed to find device idx %d\n", gpuidx);
1356 r = -EINVAL;
1357 goto unreserve_out;
1358 }
1359 adev = (struct amdgpu_device *)pdd->dev->kgd;
1360
1361 r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv),
1362 svm_range_bo_validate, NULL);
1363 if (r) {
1364 pr_debug("failed %d validate pt bos\n", r);
1365 goto unreserve_out;
1366 }
1367 }
1368
1369 return 0;
1370
1371 unreserve_out:
1372 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1373 return r;
1374 }
1375
svm_range_unreserve_bos(struct svm_validate_context * ctx)1376 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1377 {
1378 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1379 }
1380
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1381 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1382 {
1383 struct kfd_process_device *pdd;
1384 struct amdgpu_device *adev;
1385
1386 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1387 adev = (struct amdgpu_device *)pdd->dev->kgd;
1388
1389 return SVM_ADEV_PGMAP_OWNER(adev);
1390 }
1391
1392 /*
1393 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1394 *
1395 * To prevent concurrent destruction or change of range attributes, the
1396 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1397 * because that would block concurrent evictions and lead to deadlocks. To
1398 * serialize concurrent migrations or validations of the same range, the
1399 * prange->migrate_mutex must be held.
1400 *
1401 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1402 * eviction fence.
1403 *
1404 * The following sequence ensures race-free validation and GPU mapping:
1405 *
1406 * 1. Reserve page table (and SVM BO if range is in VRAM)
1407 * 2. hmm_range_fault to get page addresses (if system memory)
1408 * 3. DMA-map pages (if system memory)
1409 * 4-a. Take notifier lock
1410 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1411 * 4-c. Check that the range was not split or otherwise invalidated
1412 * 4-d. Update GPU page table
1413 * 4.e. Release notifier lock
1414 * 5. Release page table (and SVM BO) reservation
1415 */
svm_range_validate_and_map(struct mm_struct * mm,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait)1416 static int svm_range_validate_and_map(struct mm_struct *mm,
1417 struct svm_range *prange,
1418 int32_t gpuidx, bool intr, bool wait)
1419 {
1420 struct svm_validate_context ctx;
1421 unsigned long start, end, addr;
1422 struct kfd_process *p;
1423 void *owner;
1424 int32_t idx;
1425 int r = 0;
1426
1427 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1428 ctx.prange = prange;
1429 ctx.intr = intr;
1430
1431 if (gpuidx < MAX_GPU_INSTANCE) {
1432 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1433 bitmap_set(ctx.bitmap, gpuidx, 1);
1434 } else if (ctx.process->xnack_enabled) {
1435 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1436
1437 /* If prefetch range to GPU, or GPU retry fault migrate range to
1438 * GPU, which has ACCESS attribute to the range, create mapping
1439 * on that GPU.
1440 */
1441 if (prange->actual_loc) {
1442 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1443 prange->actual_loc);
1444 if (gpuidx < 0) {
1445 WARN_ONCE(1, "failed get device by id 0x%x\n",
1446 prange->actual_loc);
1447 return -EINVAL;
1448 }
1449 if (test_bit(gpuidx, prange->bitmap_access))
1450 bitmap_set(ctx.bitmap, gpuidx, 1);
1451 }
1452 } else {
1453 bitmap_or(ctx.bitmap, prange->bitmap_access,
1454 prange->bitmap_aip, MAX_GPU_INSTANCE);
1455 }
1456
1457 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1458 return 0;
1459
1460 if (prange->actual_loc && !prange->ttm_res) {
1461 /* This should never happen. actual_loc gets set by
1462 * svm_migrate_ram_to_vram after allocating a BO.
1463 */
1464 WARN_ONCE(1, "VRAM BO missing during validation\n");
1465 return -EINVAL;
1466 }
1467
1468 svm_range_reserve_bos(&ctx);
1469
1470 p = container_of(prange->svms, struct kfd_process, svms);
1471 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1472 MAX_GPU_INSTANCE));
1473 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1474 if (kfd_svm_page_owner(p, idx) != owner) {
1475 owner = NULL;
1476 break;
1477 }
1478 }
1479
1480 start = prange->start << PAGE_SHIFT;
1481 end = (prange->last + 1) << PAGE_SHIFT;
1482 for (addr = start; addr < end && !r; ) {
1483 struct hmm_range *hmm_range;
1484 struct vm_area_struct *vma;
1485 unsigned long next;
1486 unsigned long offset;
1487 unsigned long npages;
1488 bool readonly;
1489
1490 vma = find_vma(mm, addr);
1491 if (!vma || addr < vma->vm_start) {
1492 r = -EFAULT;
1493 goto unreserve_out;
1494 }
1495 readonly = !(vma->vm_flags & VM_WRITE);
1496
1497 next = min(vma->vm_end, end);
1498 npages = (next - addr) >> PAGE_SHIFT;
1499 WRITE_ONCE(p->svms.faulting_task, current);
1500 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1501 addr, npages, &hmm_range,
1502 readonly, true, owner);
1503 WRITE_ONCE(p->svms.faulting_task, NULL);
1504 if (r) {
1505 pr_debug("failed %d to get svm range pages\n", r);
1506 goto unreserve_out;
1507 }
1508
1509 offset = (addr - start) >> PAGE_SHIFT;
1510 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1511 hmm_range->hmm_pfns);
1512 if (r) {
1513 pr_debug("failed %d to dma map range\n", r);
1514 goto unreserve_out;
1515 }
1516
1517 svm_range_lock(prange);
1518 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1519 pr_debug("hmm update the range, need validate again\n");
1520 r = -EAGAIN;
1521 goto unlock_out;
1522 }
1523 if (!list_empty(&prange->child_list)) {
1524 pr_debug("range split by unmap in parallel, validate again\n");
1525 r = -EAGAIN;
1526 goto unlock_out;
1527 }
1528
1529 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1530 ctx.bitmap, wait);
1531
1532 unlock_out:
1533 svm_range_unlock(prange);
1534
1535 addr = next;
1536 }
1537
1538 if (addr == end)
1539 prange->validated_once = true;
1540
1541 unreserve_out:
1542 svm_range_unreserve_bos(&ctx);
1543
1544 if (!r)
1545 prange->validate_timestamp = ktime_to_us(ktime_get());
1546
1547 return r;
1548 }
1549
1550 /**
1551 * svm_range_list_lock_and_flush_work - flush pending deferred work
1552 *
1553 * @svms: the svm range list
1554 * @mm: the mm structure
1555 *
1556 * Context: Returns with mmap write lock held, pending deferred work flushed
1557 *
1558 */
1559 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1560 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1561 struct mm_struct *mm)
1562 {
1563 retry_flush_work:
1564 flush_work(&svms->deferred_list_work);
1565 mmap_write_lock(mm);
1566
1567 if (list_empty(&svms->deferred_range_list))
1568 return;
1569 mmap_write_unlock(mm);
1570 pr_debug("retry flush\n");
1571 goto retry_flush_work;
1572 }
1573
svm_range_restore_work(struct work_struct * work)1574 static void svm_range_restore_work(struct work_struct *work)
1575 {
1576 struct delayed_work *dwork = to_delayed_work(work);
1577 struct svm_range_list *svms;
1578 struct svm_range *prange;
1579 struct kfd_process *p;
1580 struct mm_struct *mm;
1581 int evicted_ranges;
1582 int invalid;
1583 int r;
1584
1585 svms = container_of(dwork, struct svm_range_list, restore_work);
1586 evicted_ranges = atomic_read(&svms->evicted_ranges);
1587 if (!evicted_ranges)
1588 return;
1589
1590 pr_debug("restore svm ranges\n");
1591
1592 /* kfd_process_notifier_release destroys this worker thread. So during
1593 * the lifetime of this thread, kfd_process and mm will be valid.
1594 */
1595 p = container_of(svms, struct kfd_process, svms);
1596 mm = p->mm;
1597 if (!mm)
1598 return;
1599
1600 svm_range_list_lock_and_flush_work(svms, mm);
1601 mutex_lock(&svms->lock);
1602
1603 evicted_ranges = atomic_read(&svms->evicted_ranges);
1604
1605 list_for_each_entry(prange, &svms->list, list) {
1606 invalid = atomic_read(&prange->invalid);
1607 if (!invalid)
1608 continue;
1609
1610 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1611 prange->svms, prange, prange->start, prange->last,
1612 invalid);
1613
1614 /*
1615 * If range is migrating, wait for migration is done.
1616 */
1617 mutex_lock(&prange->migrate_mutex);
1618
1619 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1620 false, true);
1621 if (r)
1622 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1623 prange->start);
1624
1625 mutex_unlock(&prange->migrate_mutex);
1626 if (r)
1627 goto out_reschedule;
1628
1629 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1630 goto out_reschedule;
1631 }
1632
1633 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1634 evicted_ranges)
1635 goto out_reschedule;
1636
1637 evicted_ranges = 0;
1638
1639 r = kgd2kfd_resume_mm(mm);
1640 if (r) {
1641 /* No recovery from this failure. Probably the CP is
1642 * hanging. No point trying again.
1643 */
1644 pr_debug("failed %d to resume KFD\n", r);
1645 }
1646
1647 pr_debug("restore svm ranges successfully\n");
1648
1649 out_reschedule:
1650 mutex_unlock(&svms->lock);
1651 mmap_write_unlock(mm);
1652
1653 /* If validation failed, reschedule another attempt */
1654 if (evicted_ranges) {
1655 pr_debug("reschedule to restore svm range\n");
1656 schedule_delayed_work(&svms->restore_work,
1657 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1658 }
1659 }
1660
1661 /**
1662 * svm_range_evict - evict svm range
1663 *
1664 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1665 * return to let CPU evict the buffer and proceed CPU pagetable update.
1666 *
1667 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1668 * If invalidation happens while restore work is running, restore work will
1669 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1670 * the queues.
1671 */
1672 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last)1673 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1674 unsigned long start, unsigned long last)
1675 {
1676 struct svm_range_list *svms = prange->svms;
1677 struct svm_range *pchild;
1678 struct kfd_process *p;
1679 int r = 0;
1680
1681 p = container_of(svms, struct kfd_process, svms);
1682
1683 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1684 svms, prange->start, prange->last, start, last);
1685
1686 if (!p->xnack_enabled) {
1687 int evicted_ranges;
1688
1689 list_for_each_entry(pchild, &prange->child_list, child_list) {
1690 mutex_lock_nested(&pchild->lock, 1);
1691 if (pchild->start <= last && pchild->last >= start) {
1692 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1693 pchild->start, pchild->last);
1694 atomic_inc(&pchild->invalid);
1695 }
1696 mutex_unlock(&pchild->lock);
1697 }
1698
1699 if (prange->start <= last && prange->last >= start)
1700 atomic_inc(&prange->invalid);
1701
1702 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1703 if (evicted_ranges != 1)
1704 return r;
1705
1706 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1707 prange->svms, prange->start, prange->last);
1708
1709 /* First eviction, stop the queues */
1710 r = kgd2kfd_quiesce_mm(mm);
1711 if (r)
1712 pr_debug("failed to quiesce KFD\n");
1713
1714 pr_debug("schedule to restore svm %p ranges\n", svms);
1715 schedule_delayed_work(&svms->restore_work,
1716 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1717 } else {
1718 unsigned long s, l;
1719
1720 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1721 prange->svms, start, last);
1722 list_for_each_entry(pchild, &prange->child_list, child_list) {
1723 mutex_lock_nested(&pchild->lock, 1);
1724 s = max(start, pchild->start);
1725 l = min(last, pchild->last);
1726 if (l >= s)
1727 svm_range_unmap_from_gpus(pchild, s, l);
1728 mutex_unlock(&pchild->lock);
1729 }
1730 s = max(start, prange->start);
1731 l = min(last, prange->last);
1732 if (l >= s)
1733 svm_range_unmap_from_gpus(prange, s, l);
1734 }
1735
1736 return r;
1737 }
1738
svm_range_clone(struct svm_range * old)1739 static struct svm_range *svm_range_clone(struct svm_range *old)
1740 {
1741 struct svm_range *new;
1742
1743 new = svm_range_new(old->svms, old->start, old->last);
1744 if (!new)
1745 return NULL;
1746
1747 if (old->svm_bo) {
1748 new->ttm_res = old->ttm_res;
1749 new->offset = old->offset;
1750 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1751 spin_lock(&new->svm_bo->list_lock);
1752 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1753 spin_unlock(&new->svm_bo->list_lock);
1754 }
1755 new->flags = old->flags;
1756 new->preferred_loc = old->preferred_loc;
1757 new->prefetch_loc = old->prefetch_loc;
1758 new->actual_loc = old->actual_loc;
1759 new->granularity = old->granularity;
1760 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1761 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1762
1763 return new;
1764 }
1765
1766 /**
1767 * svm_range_handle_overlap - split overlap ranges
1768 * @svms: svm range list header
1769 * @new: range added with this attributes
1770 * @start: range added start address, in pages
1771 * @last: range last address, in pages
1772 * @update_list: output, the ranges attributes are updated. For set_attr, this
1773 * will do validation and map to GPUs. For unmap, this will be
1774 * removed and unmap from GPUs
1775 * @insert_list: output, the ranges will be inserted into svms, attributes are
1776 * not changes. For set_attr, this will add into svms.
1777 * @remove_list:output, the ranges will be removed from svms
1778 * @left: the remaining range after overlap, For set_attr, this will be added
1779 * as new range.
1780 *
1781 * Total have 5 overlap cases.
1782 *
1783 * This function handles overlap of an address interval with existing
1784 * struct svm_ranges for applying new attributes. This may require
1785 * splitting existing struct svm_ranges. All changes should be applied to
1786 * the range_list and interval tree transactionally. If any split operation
1787 * fails, the entire update fails. Therefore the existing overlapping
1788 * svm_ranges are cloned and the original svm_ranges left unchanged. If the
1789 * transaction succeeds, the modified clones are added and the originals
1790 * freed. Otherwise the clones are removed and the old svm_ranges remain.
1791 *
1792 * Context: The caller must hold svms->lock
1793 */
1794 static int
svm_range_handle_overlap(struct svm_range_list * svms,struct svm_range * new,unsigned long start,unsigned long last,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list,unsigned long * left)1795 svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new,
1796 unsigned long start, unsigned long last,
1797 struct list_head *update_list,
1798 struct list_head *insert_list,
1799 struct list_head *remove_list,
1800 unsigned long *left)
1801 {
1802 struct interval_tree_node *node;
1803 struct svm_range *prange;
1804 struct svm_range *tmp;
1805 int r = 0;
1806
1807 INIT_LIST_HEAD(update_list);
1808 INIT_LIST_HEAD(insert_list);
1809 INIT_LIST_HEAD(remove_list);
1810
1811 node = interval_tree_iter_first(&svms->objects, start, last);
1812 while (node) {
1813 struct interval_tree_node *next;
1814 struct svm_range *old;
1815 unsigned long next_start;
1816
1817 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1818 node->last);
1819
1820 old = container_of(node, struct svm_range, it_node);
1821 next = interval_tree_iter_next(node, start, last);
1822 next_start = min(node->last, last) + 1;
1823
1824 if (node->start < start || node->last > last) {
1825 /* node intersects the updated range, clone+split it */
1826 prange = svm_range_clone(old);
1827 if (!prange) {
1828 r = -ENOMEM;
1829 goto out;
1830 }
1831
1832 list_add(&old->remove_list, remove_list);
1833 list_add(&prange->insert_list, insert_list);
1834
1835 if (node->start < start) {
1836 pr_debug("change old range start\n");
1837 r = svm_range_split_head(prange, new, start,
1838 insert_list);
1839 if (r)
1840 goto out;
1841 }
1842 if (node->last > last) {
1843 pr_debug("change old range last\n");
1844 r = svm_range_split_tail(prange, new, last,
1845 insert_list);
1846 if (r)
1847 goto out;
1848 }
1849 } else {
1850 /* The node is contained within start..last,
1851 * just update it
1852 */
1853 prange = old;
1854 }
1855
1856 if (!svm_range_is_same_attrs(prange, new))
1857 list_add(&prange->update_list, update_list);
1858
1859 /* insert a new node if needed */
1860 if (node->start > start) {
1861 prange = svm_range_new(prange->svms, start,
1862 node->start - 1);
1863 if (!prange) {
1864 r = -ENOMEM;
1865 goto out;
1866 }
1867
1868 list_add(&prange->insert_list, insert_list);
1869 list_add(&prange->update_list, update_list);
1870 }
1871
1872 node = next;
1873 start = next_start;
1874 }
1875
1876 if (left && start <= last)
1877 *left = last - start + 1;
1878
1879 out:
1880 if (r)
1881 list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1882 svm_range_free(prange);
1883
1884 return r;
1885 }
1886
1887 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)1888 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1889 struct svm_range *prange)
1890 {
1891 unsigned long start;
1892 unsigned long last;
1893
1894 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1895 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1896
1897 if (prange->start == start && prange->last == last)
1898 return;
1899
1900 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1901 prange->svms, prange, start, last, prange->start,
1902 prange->last);
1903
1904 if (start != 0 && last != 0) {
1905 interval_tree_remove(&prange->it_node, &prange->svms->objects);
1906 svm_range_remove_notifier(prange);
1907 }
1908 prange->it_node.start = prange->start;
1909 prange->it_node.last = prange->last;
1910
1911 interval_tree_insert(&prange->it_node, &prange->svms->objects);
1912 svm_range_add_notifier_locked(mm, prange);
1913 }
1914
1915 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange)1916 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1917 {
1918 struct mm_struct *mm = prange->work_item.mm;
1919
1920 switch (prange->work_item.op) {
1921 case SVM_OP_NULL:
1922 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1923 svms, prange, prange->start, prange->last);
1924 break;
1925 case SVM_OP_UNMAP_RANGE:
1926 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1927 svms, prange, prange->start, prange->last);
1928 svm_range_unlink(prange);
1929 svm_range_remove_notifier(prange);
1930 svm_range_free(prange);
1931 break;
1932 case SVM_OP_UPDATE_RANGE_NOTIFIER:
1933 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1934 svms, prange, prange->start, prange->last);
1935 svm_range_update_notifier_and_interval_tree(mm, prange);
1936 break;
1937 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
1938 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1939 svms, prange, prange->start, prange->last);
1940 svm_range_update_notifier_and_interval_tree(mm, prange);
1941 /* TODO: implement deferred validation and mapping */
1942 break;
1943 case SVM_OP_ADD_RANGE:
1944 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
1945 prange->start, prange->last);
1946 svm_range_add_to_svms(prange);
1947 svm_range_add_notifier_locked(mm, prange);
1948 break;
1949 case SVM_OP_ADD_RANGE_AND_MAP:
1950 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
1951 prange, prange->start, prange->last);
1952 svm_range_add_to_svms(prange);
1953 svm_range_add_notifier_locked(mm, prange);
1954 /* TODO: implement deferred validation and mapping */
1955 break;
1956 default:
1957 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
1958 prange->work_item.op);
1959 }
1960 }
1961
svm_range_drain_retry_fault(struct svm_range_list * svms)1962 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1963 {
1964 struct kfd_process_device *pdd;
1965 struct amdgpu_device *adev;
1966 struct kfd_process *p;
1967 int drain;
1968 uint32_t i;
1969
1970 p = container_of(svms, struct kfd_process, svms);
1971
1972 restart:
1973 drain = atomic_read(&svms->drain_pagefaults);
1974 if (!drain)
1975 return;
1976
1977 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
1978 pdd = p->pdds[i];
1979 if (!pdd)
1980 continue;
1981
1982 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
1983 adev = (struct amdgpu_device *)pdd->dev->kgd;
1984
1985 amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
1986 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
1987 }
1988 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
1989 goto restart;
1990 }
1991
svm_range_deferred_list_work(struct work_struct * work)1992 static void svm_range_deferred_list_work(struct work_struct *work)
1993 {
1994 struct svm_range_list *svms;
1995 struct svm_range *prange;
1996 struct mm_struct *mm;
1997 struct kfd_process *p;
1998
1999 svms = container_of(work, struct svm_range_list, deferred_list_work);
2000 pr_debug("enter svms 0x%p\n", svms);
2001
2002 p = container_of(svms, struct kfd_process, svms);
2003 /* Avoid mm is gone when inserting mmu notifier */
2004 mm = get_task_mm(p->lead_thread);
2005 if (!mm) {
2006 pr_debug("svms 0x%p process mm gone\n", svms);
2007 return;
2008 }
2009 retry:
2010 mmap_write_lock(mm);
2011
2012 /* Checking for the need to drain retry faults must be inside
2013 * mmap write lock to serialize with munmap notifiers.
2014 */
2015 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2016 mmap_write_unlock(mm);
2017 svm_range_drain_retry_fault(svms);
2018 goto retry;
2019 }
2020
2021 spin_lock(&svms->deferred_list_lock);
2022 while (!list_empty(&svms->deferred_range_list)) {
2023 prange = list_first_entry(&svms->deferred_range_list,
2024 struct svm_range, deferred_list);
2025 list_del_init(&prange->deferred_list);
2026 spin_unlock(&svms->deferred_list_lock);
2027
2028 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2029 prange->start, prange->last, prange->work_item.op);
2030
2031 mutex_lock(&svms->lock);
2032 mutex_lock(&prange->migrate_mutex);
2033 while (!list_empty(&prange->child_list)) {
2034 struct svm_range *pchild;
2035
2036 pchild = list_first_entry(&prange->child_list,
2037 struct svm_range, child_list);
2038 pr_debug("child prange 0x%p op %d\n", pchild,
2039 pchild->work_item.op);
2040 list_del_init(&pchild->child_list);
2041 svm_range_handle_list_op(svms, pchild);
2042 }
2043 mutex_unlock(&prange->migrate_mutex);
2044
2045 svm_range_handle_list_op(svms, prange);
2046 mutex_unlock(&svms->lock);
2047
2048 spin_lock(&svms->deferred_list_lock);
2049 }
2050 spin_unlock(&svms->deferred_list_lock);
2051
2052 mmap_write_unlock(mm);
2053 mmput(mm);
2054 pr_debug("exit svms 0x%p\n", svms);
2055 }
2056
2057 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2058 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2059 struct mm_struct *mm, enum svm_work_list_ops op)
2060 {
2061 spin_lock(&svms->deferred_list_lock);
2062 /* if prange is on the deferred list */
2063 if (!list_empty(&prange->deferred_list)) {
2064 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2065 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2066 if (op != SVM_OP_NULL &&
2067 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2068 prange->work_item.op = op;
2069 } else {
2070 prange->work_item.op = op;
2071 prange->work_item.mm = mm;
2072 list_add_tail(&prange->deferred_list,
2073 &prange->svms->deferred_range_list);
2074 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2075 prange, prange->start, prange->last, op);
2076 }
2077 spin_unlock(&svms->deferred_list_lock);
2078 }
2079
schedule_deferred_list_work(struct svm_range_list * svms)2080 void schedule_deferred_list_work(struct svm_range_list *svms)
2081 {
2082 spin_lock(&svms->deferred_list_lock);
2083 if (!list_empty(&svms->deferred_range_list))
2084 schedule_work(&svms->deferred_list_work);
2085 spin_unlock(&svms->deferred_list_lock);
2086 }
2087
2088 static void
svm_range_unmap_split(struct mm_struct * mm,struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2089 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2090 struct svm_range *prange, unsigned long start,
2091 unsigned long last)
2092 {
2093 struct svm_range *head;
2094 struct svm_range *tail;
2095
2096 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2097 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2098 prange->start, prange->last);
2099 return;
2100 }
2101 if (start > prange->last || last < prange->start)
2102 return;
2103
2104 head = tail = prange;
2105 if (start > prange->start)
2106 svm_range_split(prange, prange->start, start - 1, &tail);
2107 if (last < tail->last)
2108 svm_range_split(tail, last + 1, tail->last, &head);
2109
2110 if (head != prange && tail != prange) {
2111 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2112 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2113 } else if (tail != prange) {
2114 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2115 } else if (head != prange) {
2116 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2117 } else if (parent != prange) {
2118 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2119 }
2120 }
2121
2122 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2123 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2124 unsigned long start, unsigned long last)
2125 {
2126 struct svm_range_list *svms;
2127 struct svm_range *pchild;
2128 struct kfd_process *p;
2129 unsigned long s, l;
2130 bool unmap_parent;
2131
2132 p = kfd_lookup_process_by_mm(mm);
2133 if (!p)
2134 return;
2135 svms = &p->svms;
2136
2137 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2138 prange, prange->start, prange->last, start, last);
2139
2140 /* Make sure pending page faults are drained in the deferred worker
2141 * before the range is freed to avoid straggler interrupts on
2142 * unmapped memory causing "phantom faults".
2143 */
2144 atomic_inc(&svms->drain_pagefaults);
2145
2146 unmap_parent = start <= prange->start && last >= prange->last;
2147
2148 list_for_each_entry(pchild, &prange->child_list, child_list) {
2149 mutex_lock_nested(&pchild->lock, 1);
2150 s = max(start, pchild->start);
2151 l = min(last, pchild->last);
2152 if (l >= s)
2153 svm_range_unmap_from_gpus(pchild, s, l);
2154 svm_range_unmap_split(mm, prange, pchild, start, last);
2155 mutex_unlock(&pchild->lock);
2156 }
2157 s = max(start, prange->start);
2158 l = min(last, prange->last);
2159 if (l >= s)
2160 svm_range_unmap_from_gpus(prange, s, l);
2161 svm_range_unmap_split(mm, prange, prange, start, last);
2162
2163 if (unmap_parent)
2164 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2165 else
2166 svm_range_add_list_work(svms, prange, mm,
2167 SVM_OP_UPDATE_RANGE_NOTIFIER);
2168 schedule_deferred_list_work(svms);
2169
2170 kfd_unref_process(p);
2171 }
2172
2173 /**
2174 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2175 *
2176 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2177 * is from migration, or CPU page invalidation callback.
2178 *
2179 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2180 * work thread, and split prange if only part of prange is unmapped.
2181 *
2182 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2183 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2184 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2185 * update GPU mapping to recover.
2186 *
2187 * Context: mmap lock, notifier_invalidate_start lock are held
2188 * for invalidate event, prange lock is held if this is from migration
2189 */
2190 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2191 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2192 const struct mmu_notifier_range *range,
2193 unsigned long cur_seq)
2194 {
2195 struct svm_range *prange;
2196 unsigned long start;
2197 unsigned long last;
2198
2199 if (range->event == MMU_NOTIFY_RELEASE)
2200 return true;
2201
2202 start = mni->interval_tree.start;
2203 last = mni->interval_tree.last;
2204 start = (start > range->start ? start : range->start) >> PAGE_SHIFT;
2205 last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT;
2206 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2207 start, last, range->start >> PAGE_SHIFT,
2208 (range->end - 1) >> PAGE_SHIFT,
2209 mni->interval_tree.start >> PAGE_SHIFT,
2210 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2211
2212 prange = container_of(mni, struct svm_range, notifier);
2213
2214 svm_range_lock(prange);
2215 mmu_interval_set_seq(mni, cur_seq);
2216
2217 switch (range->event) {
2218 case MMU_NOTIFY_UNMAP:
2219 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2220 break;
2221 default:
2222 svm_range_evict(prange, mni->mm, start, last);
2223 break;
2224 }
2225
2226 svm_range_unlock(prange);
2227
2228 return true;
2229 }
2230
2231 /**
2232 * svm_range_from_addr - find svm range from fault address
2233 * @svms: svm range list header
2234 * @addr: address to search range interval tree, in pages
2235 * @parent: parent range if range is on child list
2236 *
2237 * Context: The caller must hold svms->lock
2238 *
2239 * Return: the svm_range found or NULL
2240 */
2241 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2242 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2243 struct svm_range **parent)
2244 {
2245 struct interval_tree_node *node;
2246 struct svm_range *prange;
2247 struct svm_range *pchild;
2248
2249 node = interval_tree_iter_first(&svms->objects, addr, addr);
2250 if (!node)
2251 return NULL;
2252
2253 prange = container_of(node, struct svm_range, it_node);
2254 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2255 addr, prange->start, prange->last, node->start, node->last);
2256
2257 if (addr >= prange->start && addr <= prange->last) {
2258 if (parent)
2259 *parent = prange;
2260 return prange;
2261 }
2262 list_for_each_entry(pchild, &prange->child_list, child_list)
2263 if (addr >= pchild->start && addr <= pchild->last) {
2264 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2265 addr, pchild->start, pchild->last);
2266 if (parent)
2267 *parent = prange;
2268 return pchild;
2269 }
2270
2271 return NULL;
2272 }
2273
2274 /* svm_range_best_restore_location - decide the best fault restore location
2275 * @prange: svm range structure
2276 * @adev: the GPU on which vm fault happened
2277 *
2278 * This is only called when xnack is on, to decide the best location to restore
2279 * the range mapping after GPU vm fault. Caller uses the best location to do
2280 * migration if actual loc is not best location, then update GPU page table
2281 * mapping to the best location.
2282 *
2283 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2284 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2285 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2286 * if range actual loc is cpu, best_loc is cpu
2287 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2288 * range actual loc.
2289 * Otherwise, GPU no access, best_loc is -1.
2290 *
2291 * Return:
2292 * -1 means vm fault GPU no access
2293 * 0 for CPU or GPU id
2294 */
2295 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct amdgpu_device * adev,int32_t * gpuidx)2296 svm_range_best_restore_location(struct svm_range *prange,
2297 struct amdgpu_device *adev,
2298 int32_t *gpuidx)
2299 {
2300 struct amdgpu_device *bo_adev, *preferred_adev;
2301 struct kfd_process *p;
2302 uint32_t gpuid;
2303 int r;
2304
2305 p = container_of(prange->svms, struct kfd_process, svms);
2306
2307 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx);
2308 if (r < 0) {
2309 pr_debug("failed to get gpuid from kgd\n");
2310 return -1;
2311 }
2312
2313 if (prange->preferred_loc == gpuid ||
2314 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2315 return prange->preferred_loc;
2316 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2317 preferred_adev = svm_range_get_adev_by_id(prange,
2318 prange->preferred_loc);
2319 if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2320 return prange->preferred_loc;
2321 /* fall through */
2322 }
2323
2324 if (test_bit(*gpuidx, prange->bitmap_access))
2325 return gpuid;
2326
2327 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2328 if (!prange->actual_loc)
2329 return 0;
2330
2331 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2332 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2333 return prange->actual_loc;
2334 else
2335 return 0;
2336 }
2337
2338 return -1;
2339 }
2340
2341 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2342 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2343 unsigned long *start, unsigned long *last,
2344 bool *is_heap_stack)
2345 {
2346 struct vm_area_struct *vma;
2347 struct interval_tree_node *node;
2348 unsigned long start_limit, end_limit;
2349
2350 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2351 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2352 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2353 return -EFAULT;
2354 }
2355
2356 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2357 vma->vm_end >= vma->vm_mm->start_brk) ||
2358 (vma->vm_start <= vma->vm_mm->start_stack &&
2359 vma->vm_end >= vma->vm_mm->start_stack);
2360
2361 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2362 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2363 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2364 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2365 /* First range that starts after the fault address */
2366 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2367 if (node) {
2368 end_limit = min(end_limit, node->start);
2369 /* Last range that ends before the fault address */
2370 node = container_of(rb_prev(&node->rb),
2371 struct interval_tree_node, rb);
2372 } else {
2373 /* Last range must end before addr because
2374 * there was no range after addr
2375 */
2376 node = container_of(rb_last(&p->svms.objects.rb_root),
2377 struct interval_tree_node, rb);
2378 }
2379 if (node) {
2380 if (node->last >= addr) {
2381 WARN(1, "Overlap with prev node and page fault addr\n");
2382 return -EFAULT;
2383 }
2384 start_limit = max(start_limit, node->last + 1);
2385 }
2386
2387 *start = start_limit;
2388 *last = end_limit - 1;
2389
2390 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2391 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2392 *start, *last, *is_heap_stack);
2393
2394 return 0;
2395 }
2396
2397 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2398 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2399 uint64_t *bo_s, uint64_t *bo_l)
2400 {
2401 struct amdgpu_bo_va_mapping *mapping;
2402 struct interval_tree_node *node;
2403 struct amdgpu_bo *bo = NULL;
2404 unsigned long userptr;
2405 uint32_t i;
2406 int r;
2407
2408 for (i = 0; i < p->n_pdds; i++) {
2409 struct amdgpu_vm *vm;
2410
2411 if (!p->pdds[i]->drm_priv)
2412 continue;
2413
2414 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2415 r = amdgpu_bo_reserve(vm->root.bo, false);
2416 if (r)
2417 return r;
2418
2419 /* Check userptr by searching entire vm->va interval tree */
2420 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2421 while (node) {
2422 mapping = container_of((struct rb_node *)node,
2423 struct amdgpu_bo_va_mapping, rb);
2424 bo = mapping->bo_va->base.bo;
2425
2426 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2427 start << PAGE_SHIFT,
2428 last << PAGE_SHIFT,
2429 &userptr)) {
2430 node = interval_tree_iter_next(node, 0, ~0ULL);
2431 continue;
2432 }
2433
2434 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2435 start, last);
2436 if (bo_s && bo_l) {
2437 *bo_s = userptr >> PAGE_SHIFT;
2438 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2439 }
2440 amdgpu_bo_unreserve(vm->root.bo);
2441 return -EADDRINUSE;
2442 }
2443 amdgpu_bo_unreserve(vm->root.bo);
2444 }
2445 return 0;
2446 }
2447
2448 static struct
svm_range_create_unregistered_range(struct amdgpu_device * adev,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2449 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2450 struct kfd_process *p,
2451 struct mm_struct *mm,
2452 int64_t addr)
2453 {
2454 struct svm_range *prange = NULL;
2455 unsigned long start, last;
2456 uint32_t gpuid, gpuidx;
2457 bool is_heap_stack;
2458 uint64_t bo_s = 0;
2459 uint64_t bo_l = 0;
2460 int r;
2461
2462 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2463 &is_heap_stack))
2464 return NULL;
2465
2466 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2467 if (r != -EADDRINUSE)
2468 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2469
2470 if (r == -EADDRINUSE) {
2471 if (addr >= bo_s && addr <= bo_l)
2472 return NULL;
2473
2474 /* Create one page svm range if 2MB range overlapping */
2475 start = addr;
2476 last = addr;
2477 }
2478
2479 prange = svm_range_new(&p->svms, start, last);
2480 if (!prange) {
2481 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2482 return NULL;
2483 }
2484 if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) {
2485 pr_debug("failed to get gpuid from kgd\n");
2486 svm_range_free(prange);
2487 return NULL;
2488 }
2489
2490 if (is_heap_stack)
2491 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2492
2493 svm_range_add_to_svms(prange);
2494 svm_range_add_notifier_locked(mm, prange);
2495
2496 return prange;
2497 }
2498
2499 /* svm_range_skip_recover - decide if prange can be recovered
2500 * @prange: svm range structure
2501 *
2502 * GPU vm retry fault handle skip recover the range for cases:
2503 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2504 * deferred list work will drain the stale fault before free the prange.
2505 * 2. prange is on deferred list to add interval notifier after split, or
2506 * 3. prange is child range, it is split from parent prange, recover later
2507 * after interval notifier is added.
2508 *
2509 * Return: true to skip recover, false to recover
2510 */
svm_range_skip_recover(struct svm_range * prange)2511 static bool svm_range_skip_recover(struct svm_range *prange)
2512 {
2513 struct svm_range_list *svms = prange->svms;
2514
2515 spin_lock(&svms->deferred_list_lock);
2516 if (list_empty(&prange->deferred_list) &&
2517 list_empty(&prange->child_list)) {
2518 spin_unlock(&svms->deferred_list_lock);
2519 return false;
2520 }
2521 spin_unlock(&svms->deferred_list_lock);
2522
2523 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2524 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2525 svms, prange, prange->start, prange->last);
2526 return true;
2527 }
2528 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2529 prange->work_item.op == SVM_OP_ADD_RANGE) {
2530 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2531 svms, prange, prange->start, prange->last);
2532 return true;
2533 }
2534 return false;
2535 }
2536
2537 static void
svm_range_count_fault(struct amdgpu_device * adev,struct kfd_process * p,int32_t gpuidx)2538 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2539 int32_t gpuidx)
2540 {
2541 struct kfd_process_device *pdd;
2542
2543 /* fault is on different page of same range
2544 * or fault is skipped to recover later
2545 * or fault is on invalid virtual address
2546 */
2547 if (gpuidx == MAX_GPU_INSTANCE) {
2548 uint32_t gpuid;
2549 int r;
2550
2551 r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
2552 if (r < 0)
2553 return;
2554 }
2555
2556 /* fault is recovered
2557 * or fault cannot recover because GPU no access on the range
2558 */
2559 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2560 if (pdd)
2561 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2562 }
2563
2564 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)2565 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2566 {
2567 unsigned long requested = VM_READ;
2568
2569 if (write_fault)
2570 requested |= VM_WRITE;
2571
2572 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2573 vma->vm_flags);
2574 return (vma->vm_flags & requested) == requested;
2575 }
2576
2577 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint64_t addr,bool write_fault)2578 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2579 uint64_t addr, bool write_fault)
2580 {
2581 struct mm_struct *mm = NULL;
2582 struct svm_range_list *svms;
2583 struct svm_range *prange;
2584 struct kfd_process *p;
2585 uint64_t timestamp;
2586 int32_t best_loc;
2587 int32_t gpuidx = MAX_GPU_INSTANCE;
2588 bool write_locked = false;
2589 struct vm_area_struct *vma;
2590 int r = 0;
2591
2592 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2593 pr_debug("device does not support SVM\n");
2594 return -EFAULT;
2595 }
2596
2597 p = kfd_lookup_process_by_pasid(pasid);
2598 if (!p) {
2599 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2600 return 0;
2601 }
2602 if (!p->xnack_enabled) {
2603 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2604 r = -EFAULT;
2605 goto out;
2606 }
2607 svms = &p->svms;
2608
2609 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2610
2611 if (atomic_read(&svms->drain_pagefaults)) {
2612 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2613 r = 0;
2614 goto out;
2615 }
2616
2617 /* p->lead_thread is available as kfd_process_wq_release flush the work
2618 * before releasing task ref.
2619 */
2620 mm = get_task_mm(p->lead_thread);
2621 if (!mm) {
2622 pr_debug("svms 0x%p failed to get mm\n", svms);
2623 r = 0;
2624 goto out;
2625 }
2626
2627 mmap_read_lock(mm);
2628 retry_write_locked:
2629 mutex_lock(&svms->lock);
2630 prange = svm_range_from_addr(svms, addr, NULL);
2631 if (!prange) {
2632 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2633 svms, addr);
2634 if (!write_locked) {
2635 /* Need the write lock to create new range with MMU notifier.
2636 * Also flush pending deferred work to make sure the interval
2637 * tree is up to date before we add a new range
2638 */
2639 mutex_unlock(&svms->lock);
2640 mmap_read_unlock(mm);
2641 mmap_write_lock(mm);
2642 write_locked = true;
2643 goto retry_write_locked;
2644 }
2645 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2646 if (!prange) {
2647 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2648 svms, addr);
2649 mmap_write_downgrade(mm);
2650 r = -EFAULT;
2651 goto out_unlock_svms;
2652 }
2653 }
2654 if (write_locked)
2655 mmap_write_downgrade(mm);
2656
2657 mutex_lock(&prange->migrate_mutex);
2658
2659 if (svm_range_skip_recover(prange)) {
2660 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2661 r = 0;
2662 goto out_unlock_range;
2663 }
2664
2665 timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2666 /* skip duplicate vm fault on different pages of same range */
2667 if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2668 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2669 svms, prange->start, prange->last);
2670 r = 0;
2671 goto out_unlock_range;
2672 }
2673
2674 /* __do_munmap removed VMA, return success as we are handling stale
2675 * retry fault.
2676 */
2677 vma = find_vma(mm, addr << PAGE_SHIFT);
2678 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2679 pr_debug("address 0x%llx VMA is removed\n", addr);
2680 r = 0;
2681 goto out_unlock_range;
2682 }
2683
2684 if (!svm_fault_allowed(vma, write_fault)) {
2685 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2686 write_fault ? "write" : "read");
2687 r = -EPERM;
2688 goto out_unlock_range;
2689 }
2690
2691 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2692 if (best_loc == -1) {
2693 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2694 svms, prange->start, prange->last);
2695 r = -EACCES;
2696 goto out_unlock_range;
2697 }
2698
2699 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2700 svms, prange->start, prange->last, best_loc,
2701 prange->actual_loc);
2702
2703 if (prange->actual_loc != best_loc) {
2704 if (best_loc) {
2705 r = svm_migrate_to_vram(prange, best_loc, mm);
2706 if (r) {
2707 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2708 r, addr);
2709 /* Fallback to system memory if migration to
2710 * VRAM failed
2711 */
2712 if (prange->actual_loc)
2713 r = svm_migrate_vram_to_ram(prange, mm);
2714 else
2715 r = 0;
2716 }
2717 } else {
2718 r = svm_migrate_vram_to_ram(prange, mm);
2719 }
2720 if (r) {
2721 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2722 r, svms, prange->start, prange->last);
2723 goto out_unlock_range;
2724 }
2725 }
2726
2727 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2728 if (r)
2729 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2730 r, svms, prange->start, prange->last);
2731
2732 out_unlock_range:
2733 mutex_unlock(&prange->migrate_mutex);
2734 out_unlock_svms:
2735 mutex_unlock(&svms->lock);
2736 mmap_read_unlock(mm);
2737
2738 svm_range_count_fault(adev, p, gpuidx);
2739
2740 mmput(mm);
2741 out:
2742 kfd_unref_process(p);
2743
2744 if (r == -EAGAIN) {
2745 pr_debug("recover vm fault later\n");
2746 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2747 r = 0;
2748 }
2749 return r;
2750 }
2751
svm_range_list_fini(struct kfd_process * p)2752 void svm_range_list_fini(struct kfd_process *p)
2753 {
2754 struct svm_range *prange;
2755 struct svm_range *next;
2756
2757 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2758
2759 /* Ensure list work is finished before process is destroyed */
2760 flush_work(&p->svms.deferred_list_work);
2761
2762 /*
2763 * Ensure no retry fault comes in afterwards, as page fault handler will
2764 * not find kfd process and take mm lock to recover fault.
2765 */
2766 atomic_inc(&p->svms.drain_pagefaults);
2767 svm_range_drain_retry_fault(&p->svms);
2768
2769
2770 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2771 svm_range_unlink(prange);
2772 svm_range_remove_notifier(prange);
2773 svm_range_free(prange);
2774 }
2775
2776 mutex_destroy(&p->svms.lock);
2777
2778 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2779 }
2780
svm_range_list_init(struct kfd_process * p)2781 int svm_range_list_init(struct kfd_process *p)
2782 {
2783 struct svm_range_list *svms = &p->svms;
2784 int i;
2785
2786 svms->objects = RB_ROOT_CACHED;
2787 mutex_init(&svms->lock);
2788 INIT_LIST_HEAD(&svms->list);
2789 atomic_set(&svms->evicted_ranges, 0);
2790 atomic_set(&svms->drain_pagefaults, 0);
2791 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2792 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2793 INIT_LIST_HEAD(&svms->deferred_range_list);
2794 spin_lock_init(&svms->deferred_list_lock);
2795
2796 for (i = 0; i < p->n_pdds; i++)
2797 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2798 bitmap_set(svms->bitmap_supported, i, 1);
2799
2800 return 0;
2801 }
2802
2803 /**
2804 * svm_range_check_vm - check if virtual address range mapped already
2805 * @p: current kfd_process
2806 * @start: range start address, in pages
2807 * @last: range last address, in pages
2808 * @bo_s: mapping start address in pages if address range already mapped
2809 * @bo_l: mapping last address in pages if address range already mapped
2810 *
2811 * The purpose is to avoid virtual address ranges already allocated by
2812 * kfd_ioctl_alloc_memory_of_gpu ioctl.
2813 * It looks for each pdd in the kfd_process.
2814 *
2815 * Context: Process context
2816 *
2817 * Return 0 - OK, if the range is not mapped.
2818 * Otherwise error code:
2819 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
2820 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
2821 * a signal. Release all buffer reservations and return to user-space.
2822 */
2823 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2824 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
2825 uint64_t *bo_s, uint64_t *bo_l)
2826 {
2827 struct amdgpu_bo_va_mapping *mapping;
2828 struct interval_tree_node *node;
2829 uint32_t i;
2830 int r;
2831
2832 for (i = 0; i < p->n_pdds; i++) {
2833 struct amdgpu_vm *vm;
2834
2835 if (!p->pdds[i]->drm_priv)
2836 continue;
2837
2838 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2839 r = amdgpu_bo_reserve(vm->root.bo, false);
2840 if (r)
2841 return r;
2842
2843 node = interval_tree_iter_first(&vm->va, start, last);
2844 if (node) {
2845 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
2846 start, last);
2847 mapping = container_of((struct rb_node *)node,
2848 struct amdgpu_bo_va_mapping, rb);
2849 if (bo_s && bo_l) {
2850 *bo_s = mapping->start;
2851 *bo_l = mapping->last;
2852 }
2853 amdgpu_bo_unreserve(vm->root.bo);
2854 return -EADDRINUSE;
2855 }
2856 amdgpu_bo_unreserve(vm->root.bo);
2857 }
2858
2859 return 0;
2860 }
2861
2862 /**
2863 * svm_range_is_valid - check if virtual address range is valid
2864 * @p: current kfd_process
2865 * @start: range start address, in pages
2866 * @size: range size, in pages
2867 *
2868 * Valid virtual address range means it belongs to one or more VMAs
2869 *
2870 * Context: Process context
2871 *
2872 * Return:
2873 * 0 - OK, otherwise error code
2874 */
2875 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)2876 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
2877 {
2878 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2879 struct vm_area_struct *vma;
2880 unsigned long end;
2881 unsigned long start_unchg = start;
2882
2883 start <<= PAGE_SHIFT;
2884 end = start + (size << PAGE_SHIFT);
2885 do {
2886 vma = find_vma(p->mm, start);
2887 if (!vma || start < vma->vm_start ||
2888 (vma->vm_flags & device_vma))
2889 return -EFAULT;
2890 start = min(end, vma->vm_end);
2891 } while (start < end);
2892
2893 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
2894 NULL);
2895 }
2896
2897 /**
2898 * svm_range_add - add svm range and handle overlap
2899 * @p: the range add to this process svms
2900 * @start: page size aligned
2901 * @size: page size aligned
2902 * @nattr: number of attributes
2903 * @attrs: array of attributes
2904 * @update_list: output, the ranges need validate and update GPU mapping
2905 * @insert_list: output, the ranges need insert to svms
2906 * @remove_list: output, the ranges are replaced and need remove from svms
2907 *
2908 * Check if the virtual address range has overlap with the registered ranges,
2909 * split the overlapped range, copy and adjust pages address and vram nodes in
2910 * old and new ranges.
2911 *
2912 * Context: Process context, caller must hold svms->lock
2913 *
2914 * Return:
2915 * 0 - OK, otherwise error code
2916 */
2917 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list)2918 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2919 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2920 struct list_head *update_list, struct list_head *insert_list,
2921 struct list_head *remove_list)
2922 {
2923 uint64_t last = start + size - 1UL;
2924 struct svm_range_list *svms;
2925 struct svm_range new = {0};
2926 struct svm_range *prange;
2927 unsigned long left = 0;
2928 int r = 0;
2929
2930 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last);
2931
2932 svm_range_apply_attrs(p, &new, nattr, attrs);
2933
2934 svms = &p->svms;
2935
2936 r = svm_range_handle_overlap(svms, &new, start, last, update_list,
2937 insert_list, remove_list, &left);
2938 if (r)
2939 return r;
2940
2941 if (left) {
2942 prange = svm_range_new(svms, last - left + 1, last);
2943 list_add(&prange->insert_list, insert_list);
2944 list_add(&prange->update_list, update_list);
2945 }
2946
2947 return 0;
2948 }
2949
2950 /**
2951 * svm_range_best_prefetch_location - decide the best prefetch location
2952 * @prange: svm range structure
2953 *
2954 * For xnack off:
2955 * If range map to single GPU, the best prefetch location is prefetch_loc, which
2956 * can be CPU or GPU.
2957 *
2958 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
2959 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
2960 * the best prefetch location is always CPU, because GPU can not have coherent
2961 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
2962 *
2963 * For xnack on:
2964 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
2965 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
2966 *
2967 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
2968 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
2969 * prefetch location is always CPU.
2970 *
2971 * Context: Process context
2972 *
2973 * Return:
2974 * 0 for CPU or GPU id
2975 */
2976 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)2977 svm_range_best_prefetch_location(struct svm_range *prange)
2978 {
2979 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
2980 uint32_t best_loc = prange->prefetch_loc;
2981 struct kfd_process_device *pdd;
2982 struct amdgpu_device *bo_adev;
2983 struct amdgpu_device *adev;
2984 struct kfd_process *p;
2985 uint32_t gpuidx;
2986
2987 p = container_of(prange->svms, struct kfd_process, svms);
2988
2989 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
2990 goto out;
2991
2992 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
2993 if (!bo_adev) {
2994 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
2995 best_loc = 0;
2996 goto out;
2997 }
2998
2999 if (p->xnack_enabled)
3000 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3001 else
3002 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3003 MAX_GPU_INSTANCE);
3004
3005 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3006 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3007 if (!pdd) {
3008 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3009 continue;
3010 }
3011 adev = (struct amdgpu_device *)pdd->dev->kgd;
3012
3013 if (adev == bo_adev)
3014 continue;
3015
3016 if (!amdgpu_xgmi_same_hive(adev, bo_adev)) {
3017 best_loc = 0;
3018 break;
3019 }
3020 }
3021
3022 out:
3023 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3024 p->xnack_enabled, &p->svms, prange->start, prange->last,
3025 best_loc);
3026
3027 return best_loc;
3028 }
3029
3030 /* FIXME: This is a workaround for page locking bug when some pages are
3031 * invalid during migration to VRAM
3032 */
svm_range_prefault(struct svm_range * prange,struct mm_struct * mm,void * owner)3033 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
3034 void *owner)
3035 {
3036 struct hmm_range *hmm_range;
3037 int r;
3038
3039 if (prange->validated_once)
3040 return;
3041
3042 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
3043 prange->start << PAGE_SHIFT,
3044 prange->npages, &hmm_range,
3045 false, true, owner);
3046 if (!r) {
3047 amdgpu_hmm_range_get_pages_done(hmm_range);
3048 prange->validated_once = true;
3049 }
3050 }
3051
3052 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3053 * @mm: current process mm_struct
3054 * @prange: svm range structure
3055 * @migrated: output, true if migration is triggered
3056 *
3057 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3058 * from ram to vram.
3059 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3060 * from vram to ram.
3061 *
3062 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3063 * and restore work:
3064 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3065 * stops all queues, schedule restore work
3066 * 2. svm_range_restore_work wait for migration is done by
3067 * a. svm_range_validate_vram takes prange->migrate_mutex
3068 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3069 * 3. restore work update mappings of GPU, resume all queues.
3070 *
3071 * Context: Process context
3072 *
3073 * Return:
3074 * 0 - OK, otherwise - error code of migration
3075 */
3076 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3077 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3078 bool *migrated)
3079 {
3080 uint32_t best_loc;
3081 int r = 0;
3082
3083 *migrated = false;
3084 best_loc = svm_range_best_prefetch_location(prange);
3085
3086 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3087 best_loc == prange->actual_loc)
3088 return 0;
3089
3090 if (!best_loc) {
3091 r = svm_migrate_vram_to_ram(prange, mm);
3092 *migrated = !r;
3093 return r;
3094 }
3095
3096 r = svm_migrate_to_vram(prange, best_loc, mm);
3097 *migrated = !r;
3098
3099 return r;
3100 }
3101
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3102 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3103 {
3104 if (!fence)
3105 return -EINVAL;
3106
3107 if (dma_fence_is_signaled(&fence->base))
3108 return 0;
3109
3110 if (fence->svm_bo) {
3111 WRITE_ONCE(fence->svm_bo->evicting, 1);
3112 schedule_work(&fence->svm_bo->eviction_work);
3113 }
3114
3115 return 0;
3116 }
3117
svm_range_evict_svm_bo_worker(struct work_struct * work)3118 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3119 {
3120 struct svm_range_bo *svm_bo;
3121 struct kfd_process *p;
3122 struct mm_struct *mm;
3123
3124 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3125 if (!svm_bo_ref_unless_zero(svm_bo))
3126 return; /* svm_bo was freed while eviction was pending */
3127
3128 /* svm_range_bo_release destroys this worker thread. So during
3129 * the lifetime of this thread, kfd_process and mm will be valid.
3130 */
3131 p = container_of(svm_bo->svms, struct kfd_process, svms);
3132 mm = p->mm;
3133 if (!mm)
3134 return;
3135
3136 mmap_read_lock(mm);
3137 spin_lock(&svm_bo->list_lock);
3138 while (!list_empty(&svm_bo->range_list)) {
3139 struct svm_range *prange =
3140 list_first_entry(&svm_bo->range_list,
3141 struct svm_range, svm_bo_list);
3142 int retries = 3;
3143
3144 list_del_init(&prange->svm_bo_list);
3145 spin_unlock(&svm_bo->list_lock);
3146
3147 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3148 prange->start, prange->last);
3149
3150 mutex_lock(&prange->migrate_mutex);
3151 do {
3152 svm_migrate_vram_to_ram(prange,
3153 svm_bo->eviction_fence->mm);
3154 } while (prange->actual_loc && --retries);
3155 WARN(prange->actual_loc, "Migration failed during eviction");
3156
3157 mutex_lock(&prange->lock);
3158 prange->svm_bo = NULL;
3159 mutex_unlock(&prange->lock);
3160
3161 mutex_unlock(&prange->migrate_mutex);
3162
3163 spin_lock(&svm_bo->list_lock);
3164 }
3165 spin_unlock(&svm_bo->list_lock);
3166 mmap_read_unlock(mm);
3167
3168 dma_fence_signal(&svm_bo->eviction_fence->base);
3169 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3170 * has been called in svm_migrate_vram_to_ram
3171 */
3172 WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3173 svm_range_bo_unref(svm_bo);
3174 }
3175
3176 static int
svm_range_set_attr(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3177 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3178 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3179 {
3180 struct mm_struct *mm = current->mm;
3181 struct list_head update_list;
3182 struct list_head insert_list;
3183 struct list_head remove_list;
3184 struct svm_range_list *svms;
3185 struct svm_range *prange;
3186 struct svm_range *next;
3187 int r = 0;
3188
3189 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3190 p->pasid, &p->svms, start, start + size - 1, size);
3191
3192 r = svm_range_check_attr(p, nattr, attrs);
3193 if (r)
3194 return r;
3195
3196 svms = &p->svms;
3197
3198 svm_range_list_lock_and_flush_work(svms, mm);
3199
3200 r = svm_range_is_valid(p, start, size);
3201 if (r) {
3202 pr_debug("invalid range r=%d\n", r);
3203 mmap_write_unlock(mm);
3204 goto out;
3205 }
3206
3207 mutex_lock(&svms->lock);
3208
3209 /* Add new range and split existing ranges as needed */
3210 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3211 &insert_list, &remove_list);
3212 if (r) {
3213 mutex_unlock(&svms->lock);
3214 mmap_write_unlock(mm);
3215 goto out;
3216 }
3217 /* Apply changes as a transaction */
3218 list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
3219 svm_range_add_to_svms(prange);
3220 svm_range_add_notifier_locked(mm, prange);
3221 }
3222 list_for_each_entry(prange, &update_list, update_list) {
3223 svm_range_apply_attrs(p, prange, nattr, attrs);
3224 /* TODO: unmap ranges from GPU that lost access */
3225 }
3226 list_for_each_entry_safe(prange, next, &remove_list,
3227 remove_list) {
3228 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3229 prange->svms, prange, prange->start,
3230 prange->last);
3231 svm_range_unlink(prange);
3232 svm_range_remove_notifier(prange);
3233 svm_range_free(prange);
3234 }
3235
3236 mmap_write_downgrade(mm);
3237 /* Trigger migrations and revalidate and map to GPUs as needed. If
3238 * this fails we may be left with partially completed actions. There
3239 * is no clean way of rolling back to the previous state in such a
3240 * case because the rollback wouldn't be guaranteed to work either.
3241 */
3242 list_for_each_entry(prange, &update_list, update_list) {
3243 bool migrated;
3244
3245 mutex_lock(&prange->migrate_mutex);
3246
3247 r = svm_range_trigger_migration(mm, prange, &migrated);
3248 if (r)
3249 goto out_unlock_range;
3250
3251 if (migrated && !p->xnack_enabled) {
3252 pr_debug("restore_work will update mappings of GPUs\n");
3253 mutex_unlock(&prange->migrate_mutex);
3254 continue;
3255 }
3256
3257 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3258 true, true);
3259 if (r)
3260 pr_debug("failed %d to map svm range\n", r);
3261
3262 out_unlock_range:
3263 mutex_unlock(&prange->migrate_mutex);
3264 if (r)
3265 break;
3266 }
3267
3268 svm_range_debug_dump(svms);
3269
3270 mutex_unlock(&svms->lock);
3271 mmap_read_unlock(mm);
3272 out:
3273 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3274 &p->svms, start, start + size - 1, r);
3275
3276 return r;
3277 }
3278
3279 static int
svm_range_get_attr(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3280 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3281 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3282 {
3283 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3284 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3285 bool get_preferred_loc = false;
3286 bool get_prefetch_loc = false;
3287 bool get_granularity = false;
3288 bool get_accessible = false;
3289 bool get_flags = false;
3290 uint64_t last = start + size - 1UL;
3291 struct mm_struct *mm = current->mm;
3292 uint8_t granularity = 0xff;
3293 struct interval_tree_node *node;
3294 struct svm_range_list *svms;
3295 struct svm_range *prange;
3296 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3297 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3298 uint32_t flags_and = 0xffffffff;
3299 uint32_t flags_or = 0;
3300 int gpuidx;
3301 uint32_t i;
3302 int r = 0;
3303
3304 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3305 start + size - 1, nattr);
3306
3307 /* Flush pending deferred work to avoid racing with deferred actions from
3308 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3309 * can still race with get_attr because we don't hold the mmap lock. But that
3310 * would be a race condition in the application anyway, and undefined
3311 * behaviour is acceptable in that case.
3312 */
3313 flush_work(&p->svms.deferred_list_work);
3314
3315 mmap_read_lock(mm);
3316 r = svm_range_is_valid(p, start, size);
3317 mmap_read_unlock(mm);
3318 if (r) {
3319 pr_debug("invalid range r=%d\n", r);
3320 return r;
3321 }
3322
3323 for (i = 0; i < nattr; i++) {
3324 switch (attrs[i].type) {
3325 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3326 get_preferred_loc = true;
3327 break;
3328 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3329 get_prefetch_loc = true;
3330 break;
3331 case KFD_IOCTL_SVM_ATTR_ACCESS:
3332 get_accessible = true;
3333 break;
3334 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3335 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3336 get_flags = true;
3337 break;
3338 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3339 get_granularity = true;
3340 break;
3341 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3342 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3343 fallthrough;
3344 default:
3345 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3346 return -EINVAL;
3347 }
3348 }
3349
3350 svms = &p->svms;
3351
3352 mutex_lock(&svms->lock);
3353
3354 node = interval_tree_iter_first(&svms->objects, start, last);
3355 if (!node) {
3356 pr_debug("range attrs not found return default values\n");
3357 svm_range_set_default_attributes(&location, &prefetch_loc,
3358 &granularity, &flags_and);
3359 flags_or = flags_and;
3360 if (p->xnack_enabled)
3361 bitmap_copy(bitmap_access, svms->bitmap_supported,
3362 MAX_GPU_INSTANCE);
3363 else
3364 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3365 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3366 goto fill_values;
3367 }
3368 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3369 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3370
3371 while (node) {
3372 struct interval_tree_node *next;
3373
3374 prange = container_of(node, struct svm_range, it_node);
3375 next = interval_tree_iter_next(node, start, last);
3376
3377 if (get_preferred_loc) {
3378 if (prange->preferred_loc ==
3379 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3380 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3381 location != prange->preferred_loc)) {
3382 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3383 get_preferred_loc = false;
3384 } else {
3385 location = prange->preferred_loc;
3386 }
3387 }
3388 if (get_prefetch_loc) {
3389 if (prange->prefetch_loc ==
3390 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3391 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3392 prefetch_loc != prange->prefetch_loc)) {
3393 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3394 get_prefetch_loc = false;
3395 } else {
3396 prefetch_loc = prange->prefetch_loc;
3397 }
3398 }
3399 if (get_accessible) {
3400 bitmap_and(bitmap_access, bitmap_access,
3401 prange->bitmap_access, MAX_GPU_INSTANCE);
3402 bitmap_and(bitmap_aip, bitmap_aip,
3403 prange->bitmap_aip, MAX_GPU_INSTANCE);
3404 }
3405 if (get_flags) {
3406 flags_and &= prange->flags;
3407 flags_or |= prange->flags;
3408 }
3409
3410 if (get_granularity && prange->granularity < granularity)
3411 granularity = prange->granularity;
3412
3413 node = next;
3414 }
3415 fill_values:
3416 mutex_unlock(&svms->lock);
3417
3418 for (i = 0; i < nattr; i++) {
3419 switch (attrs[i].type) {
3420 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3421 attrs[i].value = location;
3422 break;
3423 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3424 attrs[i].value = prefetch_loc;
3425 break;
3426 case KFD_IOCTL_SVM_ATTR_ACCESS:
3427 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3428 attrs[i].value);
3429 if (gpuidx < 0) {
3430 pr_debug("invalid gpuid %x\n", attrs[i].value);
3431 return -EINVAL;
3432 }
3433 if (test_bit(gpuidx, bitmap_access))
3434 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3435 else if (test_bit(gpuidx, bitmap_aip))
3436 attrs[i].type =
3437 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3438 else
3439 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3440 break;
3441 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3442 attrs[i].value = flags_and;
3443 break;
3444 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3445 attrs[i].value = ~flags_or;
3446 break;
3447 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3448 attrs[i].value = (uint32_t)granularity;
3449 break;
3450 }
3451 }
3452
3453 return 0;
3454 }
3455
3456 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)3457 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3458 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3459 {
3460 int r;
3461
3462 start >>= PAGE_SHIFT;
3463 size >>= PAGE_SHIFT;
3464
3465 switch (op) {
3466 case KFD_IOCTL_SVM_OP_SET_ATTR:
3467 r = svm_range_set_attr(p, start, size, nattrs, attrs);
3468 break;
3469 case KFD_IOCTL_SVM_OP_GET_ATTR:
3470 r = svm_range_get_attr(p, start, size, nattrs, attrs);
3471 break;
3472 default:
3473 r = EINVAL;
3474 break;
3475 }
3476
3477 return r;
3478 }
3479