/linux/drivers/gpu/drm/amd/amdkfd/ |
A D | kfd_device.c | 823 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe() 937 kfd->pdev->vendor, kfd->pdev->device, in kgd2kfd_device_init() 977 kfd->kgd, size, &kfd->gtt_mem, in kgd2kfd_device_init() 1007 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init() 1077 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_device_init() 1106 kfd->dqm->ops.pre_reset(kfd->dqm); in kgd2kfd_pre_reset() 1156 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend() 1190 kfd->pdev->vendor, kfd->pdev->device); in kgd2kfd_resume_iommu() 1198 err = kfd->dqm->ops.start(kfd->dqm); in kfd_resume() 1202 kfd->pdev->vendor, kfd->pdev->device); in kfd_resume() [all …]
|
A D | kfd_doorbell.c | 84 if (!kfd->max_doorbell_slices || in kfd_doorbell_init() 88 kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address + in kfd_doorbell_init() 93 kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, in kfd_doorbell_init() 96 if (!kfd->doorbell_kernel_ptr) in kfd_doorbell_init() 101 (uintptr_t)kfd->doorbell_base); in kfd_doorbell_init() 104 kfd->doorbell_base_dw_offset); in kfd_doorbell_init() 110 (uintptr_t)kfd->doorbell_base); in kfd_doorbell_init() 122 if (kfd->doorbell_kernel_ptr) in kfd_doorbell_fini() 173 mutex_lock(&kfd->doorbell_mutex); in kfd_get_kernel_doorbell() 206 mutex_lock(&kfd->doorbell_mutex); in kfd_release_kernel_doorbell() [all …]
|
A D | kfd_interrupt.c | 52 int kfd_interrupt_init(struct kfd_dev *kfd) in kfd_interrupt_init() argument 56 r = kfifo_alloc(&kfd->ih_fifo, in kfd_interrupt_init() 65 if (unlikely(!kfd->ih_wq)) { in kfd_interrupt_init() 66 kfifo_free(&kfd->ih_fifo); in kfd_interrupt_init() 70 spin_lock_init(&kfd->interrupt_lock); in kfd_interrupt_init() 74 kfd->interrupts_active = true; in kfd_interrupt_init() 96 kfd->interrupts_active = false; in kfd_interrupt_exit() 104 flush_workqueue(kfd->ih_wq); in kfd_interrupt_exit() 106 kfifo_free(&kfd->ih_fifo); in kfd_interrupt_exit() 117 kfd->device_info->ih_ring_entry_size); in enqueue_ih_ring_entry() [all …]
|
A D | kfd_iommu.c | 48 if (!kfd->use_iommu_v2) in kfd_iommu_check_device() 70 if (!kfd->use_iommu_v2) in kfd_iommu_device_init() 289 void kfd_iommu_suspend(struct kfd_dev *kfd) in kfd_iommu_suspend() argument 291 if (!kfd->use_iommu_v2) in kfd_iommu_suspend() 294 kfd_unbind_processes_from_device(kfd); in kfd_iommu_suspend() 298 amd_iommu_free_device(kfd->pdev); in kfd_iommu_suspend() 306 int kfd_iommu_resume(struct kfd_dev *kfd) in kfd_iommu_resume() argument 311 if (!kfd->use_iommu_v2) in kfd_iommu_resume() 322 amd_iommu_set_invalid_ppr_cb(kfd->pdev, in kfd_iommu_resume() 325 err = kfd_bind_processes_to_device(kfd); in kfd_iommu_resume() [all …]
|
A D | kfd_iommu.h | 32 int kfd_iommu_check_device(struct kfd_dev *kfd); 33 int kfd_iommu_device_init(struct kfd_dev *kfd); 38 void kfd_iommu_suspend(struct kfd_dev *kfd); 39 int kfd_iommu_resume(struct kfd_dev *kfd); 45 static inline int kfd_iommu_check_device(struct kfd_dev *kfd) in kfd_iommu_check_device() argument 49 static inline int kfd_iommu_device_init(struct kfd_dev *kfd) in kfd_iommu_device_init() argument 67 static inline void kfd_iommu_suspend(struct kfd_dev *kfd) in kfd_iommu_suspend() argument 71 static inline int kfd_iommu_resume(struct kfd_dev *kfd) in kfd_iommu_resume() argument
|
A D | kfd_priv.h | 943 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 944 int kfd_doorbell_init(struct kfd_dev *kfd); 945 void kfd_doorbell_fini(struct kfd_dev *kfd); 948 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 954 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 958 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 960 void kfd_free_process_doorbells(struct kfd_dev *kfd, 964 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 1138 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1188 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) in kfd_devcgroup_check_permission() argument [all …]
|
A D | kfd_mqd_manager_v9.c | 85 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 107 if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 111 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, in allocate_mqd() 118 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), in allocate_mqd() 301 struct kfd_dev *kfd = mm->dev; in free_mqd() local 304 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); in free_mqd()
|
A D | kfd_mqd_manager.h | 70 struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd,
|
A D | kfd_packet_manager_vi.c | 79 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_vi() local 94 kfd->max_proc_per_quantum); in pm_runlist_vi()
|
A D | kfd_packet_manager_v9.c | 121 struct kfd_dev *kfd = pm->dqm->dev; in pm_runlist_v9() local 133 kfd->max_proc_per_quantum); in pm_runlist_v9()
|
A D | kfd_mqd_manager_cik.c | 76 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 81 if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd), in allocate_mqd()
|
A D | kfd_mqd_manager_v10.c | 76 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 81 if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), in allocate_mqd()
|
A D | kfd_chardev.c | 1010 struct kfd_dev *kfd; in kfd_ioctl_create_event() local 1015 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); in kfd_ioctl_create_event() 1016 if (!kfd) { in kfd_ioctl_create_event() 1029 pdd = kfd_bind_process_to_device(kfd, p); in kfd_ioctl_create_event() 1044 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, in kfd_ioctl_create_event() 1054 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem); in kfd_ioctl_create_event()
|
A D | kfd_mqd_manager_vi.c | 79 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 84 if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd), in allocate_mqd()
|
A D | kfd_migrate.c | 216 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; in svm_migrate_addr_to_pfn() 247 return (addr - adev->kfd.dev->pgmap.range.start); in svm_migrate_addr() 934 struct kfd_dev *kfddev = adev->kfd.dev; in svm_migrate_init()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
A D | amdgpu_amdkfd.h | 328 bool kgd2kfd_device_init(struct kfd_dev *kfd, 331 void kgd2kfd_device_exit(struct kfd_dev *kfd); 332 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 333 int kgd2kfd_resume_iommu(struct kfd_dev *kfd); 334 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 335 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 336 int kgd2kfd_post_reset(struct kfd_dev *kfd); 338 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 381 static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) in kgd2kfd_pre_reset() argument 386 static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) in kgd2kfd_post_reset() argument [all …]
|
A D | amdgpu_amdkfd.c | 77 if (adev->kfd.dev) in amdgpu_amdkfd_device_probe() 119 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 170 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, in amdgpu_amdkfd_device_init() 177 if (adev->kfd.dev) { in amdgpu_amdkfd_device_fini_sw() 186 if (adev->kfd.dev) in amdgpu_amdkfd_interrupt() 192 if (adev->kfd.dev) in amdgpu_amdkfd_suspend() 200 if (adev->kfd.dev) in amdgpu_amdkfd_resume_iommu() 210 if (adev->kfd.dev) in amdgpu_amdkfd_resume() 220 if (adev->kfd.dev) in amdgpu_amdkfd_pre_reset() 230 if (adev->kfd.dev) in amdgpu_amdkfd_post_reset() [all …]
|
A D | amdgpu_sdma.c | 161 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_sdma_process_ras_data_cb()
|
A D | amdgpu_umc.c | 98 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); in amdgpu_umc_process_ras_data_cb()
|
A D | amdgpu_amdkfd_gpuvm.c | 166 (adev->kfd.vram_used + vram_needed > in amdgpu_amdkfd_reserve_mem_limit() 172 adev->kfd.vram_used += vram_needed; in amdgpu_amdkfd_reserve_mem_limit() 197 adev->kfd.vram_used -= size; in unreserve_mem_limit() 198 WARN_ONCE(adev->kfd.vram_used < 0, in unreserve_mem_limit()
|
/linux/samples/bpf/ |
A D | task_fd_query_user.c | 235 int err = -1, res, kfd, efd; in test_debug_fs_uprobe() local 241 kfd = open(buf, O_WRONLY | O_TRUNC, 0); in test_debug_fs_uprobe() 242 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 251 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 253 close(kfd); in test_debug_fs_uprobe() 254 kfd = -1; in test_debug_fs_uprobe() 271 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 272 link = bpf_program__attach_perf_event(progs[0], kfd); in test_debug_fs_uprobe() 276 close(kfd); in test_debug_fs_uprobe() 281 err = bpf_task_fd_query(getpid(), kfd, 0, buf, &len, in test_debug_fs_uprobe()
|
/linux/tools/perf/ |
A D | builtin-probe.c | 426 int ret, ret2, ufd = -1, kfd = -1; in perf_del_probe_events() local 440 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in perf_del_probe_events() 451 ret = probe_file__get_events(kfd, filter, klist); in perf_del_probe_events() 456 ret = probe_file__del_strlist(kfd, klist); in perf_del_probe_events() 479 if (kfd >= 0) in perf_del_probe_events() 480 close(kfd); in perf_del_probe_events()
|
/linux/tools/perf/util/ |
A D | probe-file.c | 152 int probe_file__open_both(int *kfd, int *ufd, int flag) in probe_file__open_both() argument 154 if (!kfd || !ufd) in probe_file__open_both() 157 *kfd = open_kprobe_events(flag & PF_FL_RW); in probe_file__open_both() 159 if (*kfd < 0 && *ufd < 0) { in probe_file__open_both() 160 print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW); in probe_file__open_both() 161 return *kfd; in probe_file__open_both()
|
A D | probe-file.h | 42 int probe_file__open_both(int *kfd, int *ufd, int flag);
|
A D | probe-event.c | 3690 int ret, ret2, ufd = -1, kfd = -1; in del_perf_probe_events() local 3697 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in del_perf_probe_events() 3701 ret = probe_file__del_events(kfd, filter); in del_perf_probe_events() 3713 if (kfd >= 0) in del_perf_probe_events() 3714 close(kfd); in del_perf_probe_events()
|