1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27
28 #include <drm/drm_drv.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32
amdgpu_job_timedout(struct drm_sched_job * s_job)33 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
34 {
35 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
36 struct amdgpu_job *job = to_amdgpu_job(s_job);
37 struct amdgpu_task_info ti;
38 struct amdgpu_device *adev = ring->adev;
39 int idx;
40
41 if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
42 DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
43 __func__, s_job->sched->name);
44
45 /* Effectively the job is aborted as the device is gone */
46 return DRM_GPU_SCHED_STAT_ENODEV;
47 }
48
49 memset(&ti, 0, sizeof(struct amdgpu_task_info));
50
51 if (amdgpu_gpu_recovery &&
52 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
53 DRM_ERROR("ring %s timeout, but soft recovered\n",
54 s_job->sched->name);
55 goto exit;
56 }
57
58 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
59 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
60 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
61 ring->fence_drv.sync_seq);
62 DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
63 ti.process_name, ti.tgid, ti.task_name, ti.pid);
64
65 if (amdgpu_device_should_recover_gpu(ring->adev)) {
66 amdgpu_device_gpu_recover(ring->adev, job);
67 } else {
68 drm_sched_suspend_timeout(&ring->sched);
69 if (amdgpu_sriov_vf(adev))
70 adev->virt.tdr_debug = true;
71 }
72
73 exit:
74 drm_dev_exit(idx);
75 return DRM_GPU_SCHED_STAT_NOMINAL;
76 }
77
amdgpu_job_alloc(struct amdgpu_device * adev,unsigned num_ibs,struct amdgpu_job ** job,struct amdgpu_vm * vm)78 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
79 struct amdgpu_job **job, struct amdgpu_vm *vm)
80 {
81 size_t size = sizeof(struct amdgpu_job);
82
83 if (num_ibs == 0)
84 return -EINVAL;
85
86 size += sizeof(struct amdgpu_ib) * num_ibs;
87
88 *job = kzalloc(size, GFP_KERNEL);
89 if (!*job)
90 return -ENOMEM;
91
92 /*
93 * Initialize the scheduler to at least some ring so that we always
94 * have a pointer to adev.
95 */
96 (*job)->base.sched = &adev->rings[0]->sched;
97 (*job)->vm = vm;
98 (*job)->ibs = (void *)&(*job)[1];
99 (*job)->num_ibs = num_ibs;
100
101 amdgpu_sync_create(&(*job)->sync);
102 amdgpu_sync_create(&(*job)->sched_sync);
103 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
104 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
105
106 return 0;
107 }
108
amdgpu_job_alloc_with_ib(struct amdgpu_device * adev,unsigned size,enum amdgpu_ib_pool_type pool_type,struct amdgpu_job ** job)109 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
110 enum amdgpu_ib_pool_type pool_type,
111 struct amdgpu_job **job)
112 {
113 int r;
114
115 r = amdgpu_job_alloc(adev, 1, job, NULL);
116 if (r)
117 return r;
118
119 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
120 if (r)
121 kfree(*job);
122
123 return r;
124 }
125
amdgpu_job_free_resources(struct amdgpu_job * job)126 void amdgpu_job_free_resources(struct amdgpu_job *job)
127 {
128 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
129 struct dma_fence *f;
130 struct dma_fence *hw_fence;
131 unsigned i;
132
133 if (job->hw_fence.ops == NULL)
134 hw_fence = job->external_hw_fence;
135 else
136 hw_fence = &job->hw_fence;
137
138 /* use sched fence if available */
139 f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
140 for (i = 0; i < job->num_ibs; ++i)
141 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
142 }
143
amdgpu_job_free_cb(struct drm_sched_job * s_job)144 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
145 {
146 struct amdgpu_job *job = to_amdgpu_job(s_job);
147
148 drm_sched_job_cleanup(s_job);
149
150 amdgpu_sync_free(&job->sync);
151 amdgpu_sync_free(&job->sched_sync);
152
153 /* only put the hw fence if has embedded fence */
154 if (job->hw_fence.ops != NULL)
155 dma_fence_put(&job->hw_fence);
156 else
157 kfree(job);
158 }
159
amdgpu_job_free(struct amdgpu_job * job)160 void amdgpu_job_free(struct amdgpu_job *job)
161 {
162 amdgpu_job_free_resources(job);
163 amdgpu_sync_free(&job->sync);
164 amdgpu_sync_free(&job->sched_sync);
165
166 /* only put the hw fence if has embedded fence */
167 if (job->hw_fence.ops != NULL)
168 dma_fence_put(&job->hw_fence);
169 else
170 kfree(job);
171 }
172
amdgpu_job_submit(struct amdgpu_job * job,struct drm_sched_entity * entity,void * owner,struct dma_fence ** f)173 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
174 void *owner, struct dma_fence **f)
175 {
176 int r;
177
178 if (!f)
179 return -EINVAL;
180
181 r = drm_sched_job_init(&job->base, entity, owner);
182 if (r)
183 return r;
184
185 drm_sched_job_arm(&job->base);
186
187 *f = dma_fence_get(&job->base.s_fence->finished);
188 amdgpu_job_free_resources(job);
189 drm_sched_entity_push_job(&job->base);
190
191 return 0;
192 }
193
amdgpu_job_submit_direct(struct amdgpu_job * job,struct amdgpu_ring * ring,struct dma_fence ** fence)194 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
195 struct dma_fence **fence)
196 {
197 int r;
198
199 job->base.sched = &ring->sched;
200 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
201 /* record external_hw_fence for direct submit */
202 job->external_hw_fence = dma_fence_get(*fence);
203 if (r)
204 return r;
205
206 amdgpu_job_free(job);
207 dma_fence_put(*fence);
208
209 return 0;
210 }
211
amdgpu_job_dependency(struct drm_sched_job * sched_job,struct drm_sched_entity * s_entity)212 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
213 struct drm_sched_entity *s_entity)
214 {
215 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
216 struct amdgpu_job *job = to_amdgpu_job(sched_job);
217 struct amdgpu_vm *vm = job->vm;
218 struct dma_fence *fence;
219 int r;
220
221 fence = amdgpu_sync_get_fence(&job->sync);
222 if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
223 r = amdgpu_sync_fence(&job->sched_sync, fence);
224 if (r)
225 DRM_ERROR("Error adding fence (%d)\n", r);
226 }
227
228 while (fence == NULL && vm && !job->vmid) {
229 r = amdgpu_vmid_grab(vm, ring, &job->sync,
230 &job->base.s_fence->finished,
231 job);
232 if (r)
233 DRM_ERROR("Error getting VM ID (%d)\n", r);
234
235 fence = amdgpu_sync_get_fence(&job->sync);
236 }
237
238 return fence;
239 }
240
amdgpu_job_run(struct drm_sched_job * sched_job)241 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
242 {
243 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
244 struct dma_fence *fence = NULL, *finished;
245 struct amdgpu_job *job;
246 int r = 0;
247
248 job = to_amdgpu_job(sched_job);
249 finished = &job->base.s_fence->finished;
250
251 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
252
253 trace_amdgpu_sched_run_job(job);
254
255 if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
256 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
257
258 if (finished->error < 0) {
259 DRM_INFO("Skip scheduling IBs!\n");
260 } else {
261 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
262 &fence);
263 if (r)
264 DRM_ERROR("Error scheduling IBs (%d)\n", r);
265 }
266
267 if (!job->job_run_counter)
268 dma_fence_get(fence);
269 else if (finished->error < 0)
270 dma_fence_put(&job->hw_fence);
271 job->job_run_counter++;
272 amdgpu_job_free_resources(job);
273
274 fence = r ? ERR_PTR(r) : fence;
275 return fence;
276 }
277
278 #define to_drm_sched_job(sched_job) \
279 container_of((sched_job), struct drm_sched_job, queue_node)
280
amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler * sched)281 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
282 {
283 struct drm_sched_job *s_job;
284 struct drm_sched_entity *s_entity = NULL;
285 int i;
286
287 /* Signal all jobs not yet scheduled */
288 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
289 struct drm_sched_rq *rq = &sched->sched_rq[i];
290
291 if (!rq)
292 continue;
293
294 spin_lock(&rq->lock);
295 list_for_each_entry(s_entity, &rq->entities, list) {
296 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
297 struct drm_sched_fence *s_fence = s_job->s_fence;
298
299 dma_fence_signal(&s_fence->scheduled);
300 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
301 dma_fence_signal(&s_fence->finished);
302 }
303 }
304 spin_unlock(&rq->lock);
305 }
306
307 /* Signal all jobs already scheduled to HW */
308 list_for_each_entry(s_job, &sched->pending_list, list) {
309 struct drm_sched_fence *s_fence = s_job->s_fence;
310
311 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
312 dma_fence_signal(&s_fence->finished);
313 }
314 }
315
316 const struct drm_sched_backend_ops amdgpu_sched_ops = {
317 .dependency = amdgpu_job_dependency,
318 .run_job = amdgpu_job_run,
319 .timedout_job = amdgpu_job_timedout,
320 .free_job = amdgpu_job_free_cb
321 };
322