Lines Matching refs:sched
73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
79 rq->sched = sched; in drm_sched_rq_init()
96 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
115 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
175 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
177 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done()
178 atomic_dec(sched->score); in drm_sched_job_done()
185 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done()
211 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
219 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
233 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
235 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
236 !list_empty(&sched->pending_list)) in drm_sched_start_timeout()
237 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
247 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
249 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); in drm_sched_fault()
265 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) in drm_sched_suspend_timeout() argument
269 sched_timeout = sched->work_tdr.timer.expires; in drm_sched_suspend_timeout()
275 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) in drm_sched_suspend_timeout()
279 return sched->timeout; in drm_sched_suspend_timeout()
291 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, in drm_sched_resume_timeout() argument
294 spin_lock(&sched->job_list_lock); in drm_sched_resume_timeout()
296 if (list_empty(&sched->pending_list)) in drm_sched_resume_timeout()
297 cancel_delayed_work(&sched->work_tdr); in drm_sched_resume_timeout()
299 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); in drm_sched_resume_timeout()
301 spin_unlock(&sched->job_list_lock); in drm_sched_resume_timeout()
307 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
309 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
310 list_add_tail(&s_job->list, &sched->pending_list); in drm_sched_job_begin()
311 drm_sched_start_timeout(sched); in drm_sched_job_begin()
312 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
317 struct drm_gpu_scheduler *sched; in drm_sched_job_timedout() local
321 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); in drm_sched_job_timedout()
324 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
325 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
335 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
337 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
343 if (sched->free_guilty) { in drm_sched_job_timedout()
344 job->sched->ops->free_job(job); in drm_sched_job_timedout()
345 sched->free_guilty = false; in drm_sched_job_timedout()
348 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
352 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
353 drm_sched_start_timeout(sched); in drm_sched_job_timedout()
354 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
391 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_stop() argument
395 kthread_park(sched->thread); in drm_sched_stop()
404 if (bad && bad->sched == sched) in drm_sched_stop()
409 list_add(&bad->list, &sched->pending_list); in drm_sched_stop()
417 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, in drm_sched_stop()
422 atomic_dec(&sched->hw_rq_count); in drm_sched_stop()
428 spin_lock(&sched->job_list_lock); in drm_sched_stop()
430 spin_unlock(&sched->job_list_lock); in drm_sched_stop()
446 sched->ops->free_job(s_job); in drm_sched_stop()
448 sched->free_guilty = true; in drm_sched_stop()
458 cancel_delayed_work(&sched->work_tdr); in drm_sched_stop()
470 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) in drm_sched_start() argument
480 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_start()
483 atomic_inc(&sched->hw_rq_count); in drm_sched_start()
501 spin_lock(&sched->job_list_lock); in drm_sched_start()
502 drm_sched_start_timeout(sched); in drm_sched_start()
503 spin_unlock(&sched->job_list_lock); in drm_sched_start()
506 kthread_unpark(sched->thread); in drm_sched_start()
516 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) in drm_sched_resubmit_jobs() argument
518 drm_sched_resubmit_jobs_ext(sched, INT_MAX); in drm_sched_resubmit_jobs()
529 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) in drm_sched_resubmit_jobs_ext() argument
537 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_resubmit_jobs_ext()
543 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_resubmit_jobs_ext()
552 fence = sched->ops->run_job(s_job); in drm_sched_resubmit_jobs_ext()
622 struct drm_gpu_scheduler *sched; in drm_sched_job_arm() local
627 sched = entity->rq->sched; in drm_sched_job_arm()
629 job->sched = sched; in drm_sched_job_arm()
630 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_arm()
631 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_arm()
763 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) in drm_sched_ready() argument
765 return atomic_read(&sched->hw_rq_count) < in drm_sched_ready()
766 sched->hw_submission_limit; in drm_sched_ready()
775 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) in drm_sched_wakeup() argument
777 if (drm_sched_ready(sched)) in drm_sched_wakeup()
778 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup()
789 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
794 if (!drm_sched_ready(sched)) in drm_sched_select_entity()
799 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
816 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) in drm_sched_get_cleanup_job() argument
820 spin_lock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
822 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
830 cancel_delayed_work(&sched->work_tdr); in drm_sched_get_cleanup_job()
832 next = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
839 drm_sched_start_timeout(sched); in drm_sched_get_cleanup_job()
845 spin_unlock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
862 struct drm_gpu_scheduler *sched, *picked_sched = NULL; in drm_sched_pick_best() local
867 sched = sched_list[i]; in drm_sched_pick_best()
869 if (!sched->ready) { in drm_sched_pick_best()
871 sched->name); in drm_sched_pick_best()
875 num_score = atomic_read(sched->score); in drm_sched_pick_best()
878 picked_sched = sched; in drm_sched_pick_best()
893 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
912 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
924 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
925 (cleanup_job = drm_sched_get_cleanup_job(sched)) || in drm_sched_main()
926 (!drm_sched_blocked(sched) && in drm_sched_main()
927 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
931 sched->ops->free_job(cleanup_job); in drm_sched_main()
945 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
949 fence = sched->ops->run_job(sched_job); in drm_sched_main()
970 wake_up(&sched->job_scheduled); in drm_sched_main()
990 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
997 sched->ops = ops; in drm_sched_init()
998 sched->hw_submission_limit = hw_submission; in drm_sched_init()
999 sched->name = name; in drm_sched_init()
1000 sched->timeout = timeout; in drm_sched_init()
1001 sched->timeout_wq = timeout_wq ? : system_wq; in drm_sched_init()
1002 sched->hang_limit = hang_limit; in drm_sched_init()
1003 sched->score = score ? score : &sched->_score; in drm_sched_init()
1005 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
1007 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
1008 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
1009 INIT_LIST_HEAD(&sched->pending_list); in drm_sched_init()
1010 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
1011 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
1012 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); in drm_sched_init()
1013 atomic_set(&sched->_score, 0); in drm_sched_init()
1014 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
1017 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
1018 if (IS_ERR(sched->thread)) { in drm_sched_init()
1019 ret = PTR_ERR(sched->thread); in drm_sched_init()
1020 sched->thread = NULL; in drm_sched_init()
1025 sched->ready = true; in drm_sched_init()
1037 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
1042 if (sched->thread) in drm_sched_fini()
1043 kthread_stop(sched->thread); in drm_sched_fini()
1046 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_fini()
1064 wake_up_all(&sched->job_scheduled); in drm_sched_fini()
1067 cancel_delayed_work_sync(&sched->work_tdr); in drm_sched_fini()
1069 sched->ready = false; in drm_sched_fini()
1085 struct drm_gpu_scheduler *sched = bad->sched; in drm_sched_increase_karma_ext() local
1099 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_increase_karma_ext()