1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 
32 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
33 
34 struct drm_gem_object;
35 
36 struct drm_gpu_scheduler;
37 struct drm_sched_rq;
38 
39 /* These are often used as an (initial) index
40  * to an array, and as such should start at 0.
41  */
42 enum drm_sched_priority {
43 	DRM_SCHED_PRIORITY_MIN,
44 	DRM_SCHED_PRIORITY_NORMAL,
45 	DRM_SCHED_PRIORITY_HIGH,
46 	DRM_SCHED_PRIORITY_KERNEL,
47 
48 	DRM_SCHED_PRIORITY_COUNT,
49 	DRM_SCHED_PRIORITY_UNSET = -2
50 };
51 
52 /**
53  * struct drm_sched_entity - A wrapper around a job queue (typically
54  * attached to the DRM file_priv).
55  *
56  * Entities will emit jobs in order to their corresponding hardware
57  * ring, and the scheduler will alternate between entities based on
58  * scheduling policy.
59  */
60 struct drm_sched_entity {
61 	/**
62 	 * @list:
63 	 *
64 	 * Used to append this struct to the list of entities in the runqueue
65 	 * @rq under &drm_sched_rq.entities.
66 	 *
67 	 * Protected by &drm_sched_rq.lock of @rq.
68 	 */
69 	struct list_head		list;
70 
71 	/**
72 	 * @rq:
73 	 *
74 	 * Runqueue on which this entity is currently scheduled.
75 	 *
76 	 * FIXME: Locking is very unclear for this. Writers are protected by
77 	 * @rq_lock, but readers are generally lockless and seem to just race
78 	 * with not even a READ_ONCE.
79 	 */
80 	struct drm_sched_rq		*rq;
81 
82 	/**
83 	 * @sched_list:
84 	 *
85 	 * A list of schedulers (struct drm_gpu_scheduler).  Jobs from this entity can
86 	 * be scheduled on any scheduler on this list.
87 	 *
88 	 * This can be modified by calling drm_sched_entity_modify_sched().
89 	 * Locking is entirely up to the driver, see the above function for more
90 	 * details.
91 	 *
92 	 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
93 	 * set already.
94 	 *
95 	 * FIXME: This means priority changes through
96 	 * drm_sched_entity_set_priority() will be lost henceforth in this case.
97 	 */
98 	struct drm_gpu_scheduler        **sched_list;
99 
100 	/**
101 	 * @num_sched_list:
102 	 *
103 	 * Number of drm_gpu_schedulers in the @sched_list.
104 	 */
105 	unsigned int                    num_sched_list;
106 
107 	/**
108 	 * @priority:
109 	 *
110 	 * Priority of the entity. This can be modified by calling
111 	 * drm_sched_entity_set_priority(). Protected by &rq_lock.
112 	 */
113 	enum drm_sched_priority         priority;
114 
115 	/**
116 	 * @rq_lock:
117 	 *
118 	 * Lock to modify the runqueue to which this entity belongs.
119 	 */
120 	spinlock_t			rq_lock;
121 
122 	/**
123 	 * @job_queue: the list of jobs of this entity.
124 	 */
125 	struct spsc_queue		job_queue;
126 
127 	/**
128 	 * @fence_seq:
129 	 *
130 	 * A linearly increasing seqno incremented with each new
131 	 * &drm_sched_fence which is part of the entity.
132 	 *
133 	 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
134 	 * this doesn't need to be atomic.
135 	 */
136 	atomic_t			fence_seq;
137 
138 	/**
139 	 * @fence_context:
140 	 *
141 	 * A unique context for all the fences which belong to this entity.  The
142 	 * &drm_sched_fence.scheduled uses the fence_context but
143 	 * &drm_sched_fence.finished uses fence_context + 1.
144 	 */
145 	uint64_t			fence_context;
146 
147 	/**
148 	 * @dependency:
149 	 *
150 	 * The dependency fence of the job which is on the top of the job queue.
151 	 */
152 	struct dma_fence		*dependency;
153 
154 	/**
155 	 * @cb:
156 	 *
157 	 * Callback for the dependency fence above.
158 	 */
159 	struct dma_fence_cb		cb;
160 
161 	/**
162 	 * @guilty:
163 	 *
164 	 * Points to entities' guilty.
165 	 */
166 	atomic_t			*guilty;
167 
168 	/**
169 	 * @last_scheduled:
170 	 *
171 	 * Points to the finished fence of the last scheduled job. Only written
172 	 * by the scheduler thread, can be accessed locklessly from
173 	 * drm_sched_job_arm() iff the queue is empty.
174 	 */
175 	struct dma_fence                *last_scheduled;
176 
177 	/**
178 	 * @last_user: last group leader pushing a job into the entity.
179 	 */
180 	struct task_struct		*last_user;
181 
182 	/**
183 	 * @stopped:
184 	 *
185 	 * Marks the enity as removed from rq and destined for
186 	 * termination. This is set by calling drm_sched_entity_flush() and by
187 	 * drm_sched_fini().
188 	 */
189 	bool 				stopped;
190 
191 	/**
192 	 * @entity_idle:
193 	 *
194 	 * Signals when entity is not in use, used to sequence entity cleanup in
195 	 * drm_sched_entity_fini().
196 	 */
197 	struct completion		entity_idle;
198 };
199 
200 /**
201  * struct drm_sched_rq - queue of entities to be scheduled.
202  *
203  * @lock: to modify the entities list.
204  * @sched: the scheduler to which this rq belongs to.
205  * @entities: list of the entities to be scheduled.
206  * @current_entity: the entity which is to be scheduled.
207  *
208  * Run queue is a set of entities scheduling command submissions for
209  * one specific ring. It implements the scheduling policy that selects
210  * the next entity to emit commands from.
211  */
212 struct drm_sched_rq {
213 	spinlock_t			lock;
214 	struct drm_gpu_scheduler	*sched;
215 	struct list_head		entities;
216 	struct drm_sched_entity		*current_entity;
217 };
218 
219 /**
220  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
221  */
222 struct drm_sched_fence {
223         /**
224          * @scheduled: this fence is what will be signaled by the scheduler
225          * when the job is scheduled.
226          */
227 	struct dma_fence		scheduled;
228 
229         /**
230          * @finished: this fence is what will be signaled by the scheduler
231          * when the job is completed.
232          *
233          * When setting up an out fence for the job, you should use
234          * this, since it's available immediately upon
235          * drm_sched_job_init(), and the fence returned by the driver
236          * from run_job() won't be created until the dependencies have
237          * resolved.
238          */
239 	struct dma_fence		finished;
240 
241         /**
242          * @parent: the fence returned by &drm_sched_backend_ops.run_job
243          * when scheduling the job on hardware. We signal the
244          * &drm_sched_fence.finished fence once parent is signalled.
245          */
246 	struct dma_fence		*parent;
247         /**
248          * @sched: the scheduler instance to which the job having this struct
249          * belongs to.
250          */
251 	struct drm_gpu_scheduler	*sched;
252         /**
253          * @lock: the lock used by the scheduled and the finished fences.
254          */
255 	spinlock_t			lock;
256         /**
257          * @owner: job owner for debugging
258          */
259 	void				*owner;
260 };
261 
262 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
263 
264 /**
265  * struct drm_sched_job - A job to be run by an entity.
266  *
267  * @queue_node: used to append this struct to the queue of jobs in an entity.
268  * @list: a job participates in a "pending" and "done" lists.
269  * @sched: the scheduler instance on which this job is scheduled.
270  * @s_fence: contains the fences for the scheduling of job.
271  * @finish_cb: the callback for the finished fence.
272  * @id: a unique id assigned to each job scheduled on the scheduler.
273  * @karma: increment on every hang caused by this job. If this exceeds the hang
274  *         limit of the scheduler then the job is marked guilty and will not
275  *         be scheduled further.
276  * @s_priority: the priority of the job.
277  * @entity: the entity to which this job belongs.
278  * @cb: the callback for the parent fence in s_fence.
279  *
280  * A job is created by the driver using drm_sched_job_init(), and
281  * should call drm_sched_entity_push_job() once it wants the scheduler
282  * to schedule the job.
283  */
284 struct drm_sched_job {
285 	struct spsc_node		queue_node;
286 	struct list_head		list;
287 	struct drm_gpu_scheduler	*sched;
288 	struct drm_sched_fence		*s_fence;
289 	struct dma_fence_cb		finish_cb;
290 	uint64_t			id;
291 	atomic_t			karma;
292 	enum drm_sched_priority		s_priority;
293 	struct drm_sched_entity         *entity;
294 	struct dma_fence_cb		cb;
295 	/**
296 	 * @dependencies:
297 	 *
298 	 * Contains the dependencies as struct dma_fence for this job, see
299 	 * drm_sched_job_add_dependency() and
300 	 * drm_sched_job_add_implicit_dependencies().
301 	 */
302 	struct xarray			dependencies;
303 
304 	/** @last_dependency: tracks @dependencies as they signal */
305 	unsigned long			last_dependency;
306 };
307 
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)308 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
309 					    int threshold)
310 {
311 	return s_job && atomic_inc_return(&s_job->karma) > threshold;
312 }
313 
314 enum drm_gpu_sched_stat {
315 	DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
316 	DRM_GPU_SCHED_STAT_NOMINAL,
317 	DRM_GPU_SCHED_STAT_ENODEV,
318 };
319 
320 /**
321  * struct drm_sched_backend_ops
322  *
323  * Define the backend operations called by the scheduler,
324  * these functions should be implemented in driver side.
325  */
326 struct drm_sched_backend_ops {
327 	/**
328 	 * @dependency:
329 	 *
330 	 * Called when the scheduler is considering scheduling this job next, to
331 	 * get another struct dma_fence for this job to block on.  Once it
332 	 * returns NULL, run_job() may be called.
333 	 *
334 	 * If a driver exclusively uses drm_sched_job_add_dependency() and
335 	 * drm_sched_job_add_implicit_dependencies() this can be ommitted and
336 	 * left as NULL.
337 	 */
338 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
339 					struct drm_sched_entity *s_entity);
340 
341 	/**
342          * @run_job: Called to execute the job once all of the dependencies
343          * have been resolved.  This may be called multiple times, if
344 	 * timedout_job() has happened and drm_sched_job_recovery()
345 	 * decides to try it again.
346 	 */
347 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
348 
349 	/**
350 	 * @timedout_job: Called when a job has taken too long to execute,
351 	 * to trigger GPU recovery.
352 	 *
353 	 * This method is called in a workqueue context.
354 	 *
355 	 * Drivers typically issue a reset to recover from GPU hangs, and this
356 	 * procedure usually follows the following workflow:
357 	 *
358 	 * 1. Stop the scheduler using drm_sched_stop(). This will park the
359 	 *    scheduler thread and cancel the timeout work, guaranteeing that
360 	 *    nothing is queued while we reset the hardware queue
361 	 * 2. Try to gracefully stop non-faulty jobs (optional)
362 	 * 3. Issue a GPU reset (driver-specific)
363 	 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
364 	 * 5. Restart the scheduler using drm_sched_start(). At that point, new
365 	 *    jobs can be queued, and the scheduler thread is unblocked
366 	 *
367 	 * Note that some GPUs have distinct hardware queues but need to reset
368 	 * the GPU globally, which requires extra synchronization between the
369 	 * timeout handler of the different &drm_gpu_scheduler. One way to
370 	 * achieve this synchronization is to create an ordered workqueue
371 	 * (using alloc_ordered_workqueue()) at the driver level, and pass this
372 	 * queue to drm_sched_init(), to guarantee that timeout handlers are
373 	 * executed sequentially. The above workflow needs to be slightly
374 	 * adjusted in that case:
375 	 *
376 	 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
377 	 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
378 	 *    the reset (optional)
379 	 * 3. Issue a GPU reset on all faulty queues (driver-specific)
380 	 * 4. Re-submit jobs on all schedulers impacted by the reset using
381 	 *    drm_sched_resubmit_jobs()
382 	 * 5. Restart all schedulers that were stopped in step #1 using
383 	 *    drm_sched_start()
384 	 *
385 	 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
386 	 * and the underlying driver has started or completed recovery.
387 	 *
388 	 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
389 	 * available, i.e. has been unplugged.
390 	 */
391 	enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
392 
393 	/**
394          * @free_job: Called once the job's finished fence has been signaled
395          * and it's time to clean it up.
396 	 */
397 	void (*free_job)(struct drm_sched_job *sched_job);
398 };
399 
400 /**
401  * struct drm_gpu_scheduler
402  *
403  * @ops: backend operations provided by the driver.
404  * @hw_submission_limit: the max size of the hardware queue.
405  * @timeout: the time after which a job is removed from the scheduler.
406  * @name: name of the ring for which this scheduler is being used.
407  * @sched_rq: priority wise array of run queues.
408  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
409  *                  is ready to be scheduled.
410  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
411  *                 waits on this wait queue until all the scheduled jobs are
412  *                 finished.
413  * @hw_rq_count: the number of jobs currently in the hardware queue.
414  * @job_id_count: used to assign unique id to the each job.
415  * @timeout_wq: workqueue used to queue @work_tdr
416  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
417  *            timeout interval is over.
418  * @thread: the kthread on which the scheduler which run.
419  * @pending_list: the list of jobs which are currently in the job queue.
420  * @job_list_lock: lock to protect the pending_list.
421  * @hang_limit: once the hangs by a job crosses this limit then it is marked
422  *              guilty and it will no longer be considered for scheduling.
423  * @score: score to help loadbalancer pick a idle sched
424  * @_score: score used when the driver doesn't provide one
425  * @ready: marks if the underlying HW is ready to work
426  * @free_guilty: A hit to time out handler to free the guilty job.
427  *
428  * One scheduler is implemented for each hardware ring.
429  */
430 struct drm_gpu_scheduler {
431 	const struct drm_sched_backend_ops	*ops;
432 	uint32_t			hw_submission_limit;
433 	long				timeout;
434 	const char			*name;
435 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_COUNT];
436 	wait_queue_head_t		wake_up_worker;
437 	wait_queue_head_t		job_scheduled;
438 	atomic_t			hw_rq_count;
439 	atomic64_t			job_id_count;
440 	struct workqueue_struct		*timeout_wq;
441 	struct delayed_work		work_tdr;
442 	struct task_struct		*thread;
443 	struct list_head		pending_list;
444 	spinlock_t			job_list_lock;
445 	int				hang_limit;
446 	atomic_t                        *score;
447 	atomic_t                        _score;
448 	bool				ready;
449 	bool				free_guilty;
450 };
451 
452 int drm_sched_init(struct drm_gpu_scheduler *sched,
453 		   const struct drm_sched_backend_ops *ops,
454 		   uint32_t hw_submission, unsigned hang_limit,
455 		   long timeout, struct workqueue_struct *timeout_wq,
456 		   atomic_t *score, const char *name);
457 
458 void drm_sched_fini(struct drm_gpu_scheduler *sched);
459 int drm_sched_job_init(struct drm_sched_job *job,
460 		       struct drm_sched_entity *entity,
461 		       void *owner);
462 void drm_sched_job_arm(struct drm_sched_job *job);
463 int drm_sched_job_add_dependency(struct drm_sched_job *job,
464 				 struct dma_fence *fence);
465 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
466 					    struct drm_gem_object *obj,
467 					    bool write);
468 
469 
470 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
471 				    struct drm_gpu_scheduler **sched_list,
472                                    unsigned int num_sched_list);
473 
474 void drm_sched_job_cleanup(struct drm_sched_job *job);
475 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
476 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
477 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
478 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
479 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
480 void drm_sched_increase_karma(struct drm_sched_job *bad);
481 void drm_sched_reset_karma(struct drm_sched_job *bad);
482 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
483 bool drm_sched_dependency_optimized(struct dma_fence* fence,
484 				    struct drm_sched_entity *entity);
485 void drm_sched_fault(struct drm_gpu_scheduler *sched);
486 void drm_sched_job_kickout(struct drm_sched_job *s_job);
487 
488 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
489 			     struct drm_sched_entity *entity);
490 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
491 				struct drm_sched_entity *entity);
492 
493 int drm_sched_entity_init(struct drm_sched_entity *entity,
494 			  enum drm_sched_priority priority,
495 			  struct drm_gpu_scheduler **sched_list,
496 			  unsigned int num_sched_list,
497 			  atomic_t *guilty);
498 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
499 void drm_sched_entity_fini(struct drm_sched_entity *entity);
500 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
501 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
502 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
503 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
504 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
505 				   enum drm_sched_priority priority);
506 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
507 
508 struct drm_sched_fence *drm_sched_fence_alloc(
509 	struct drm_sched_entity *s_entity, void *owner);
510 void drm_sched_fence_init(struct drm_sched_fence *fence,
511 			  struct drm_sched_entity *entity);
512 void drm_sched_fence_free(struct drm_sched_fence *fence);
513 
514 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
515 void drm_sched_fence_finished(struct drm_sched_fence *fence);
516 
517 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
518 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
519 		                unsigned long remaining);
520 struct drm_gpu_scheduler *
521 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
522 		     unsigned int num_sched_list);
523 
524 #endif
525