Lines Matching refs:cs
150 void cs_get(struct hl_cs *cs) in cs_get() argument
152 kref_get(&cs->refcount); in cs_get()
155 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
157 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
160 static void cs_put(struct hl_cs *cs) in cs_put() argument
162 kref_put(&cs->refcount, cs_do_release); in cs_put()
177 bool cs_needs_completion(struct hl_cs *cs) in cs_needs_completion() argument
182 if (cs->staged_cs && !cs->staged_last) in cs_needs_completion()
188 bool cs_needs_timeout(struct hl_cs *cs) in cs_needs_timeout() argument
193 if (cs->staged_cs && !cs->staged_first) in cs_needs_timeout()
228 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
229 parser.cs_sequence = job->cs->sequence; in cs_parser()
240 parser.completion = cs_needs_completion(job->cs); in cs_parser()
269 struct hl_cs *cs = job->cs; in complete_job() local
300 spin_lock(&cs->job_lock); in complete_job()
302 spin_unlock(&cs->job_lock); in complete_job()
318 if (cs_needs_completion(cs) && in complete_job()
321 cs_put(cs); in complete_job()
338 struct hl_cs *cs; in hl_staged_cs_find_first() local
340 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) in hl_staged_cs_find_first()
341 if (cs->staged_cs && cs->staged_first && in hl_staged_cs_find_first()
342 cs->sequence == cs_seq) in hl_staged_cs_find_first()
343 return cs; in hl_staged_cs_find_first()
355 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) in is_staged_cs_last_exists() argument
359 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, in is_staged_cs_last_exists()
378 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_get() argument
385 if (!cs->staged_last) in staged_cs_get()
386 cs_get(cs); in staged_cs_get()
397 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_put() argument
402 if (!cs_needs_completion(cs)) in staged_cs_put()
403 cs_put(cs); in staged_cs_put()
406 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) in cs_handle_tdr() argument
411 if (!cs_needs_timeout(cs)) in cs_handle_tdr()
424 if (cs->staged_cs && cs->staged_last) { in cs_handle_tdr()
425 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); in cs_handle_tdr()
427 cs = first_cs; in cs_handle_tdr()
435 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) in cs_handle_tdr()
438 if (cs->tdr_active) in cs_handle_tdr()
439 cancel_delayed_work_sync(&cs->work_tdr); in cs_handle_tdr()
507 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) in complete_multi_cs() argument
509 struct hl_fence *fence = cs->fence; in complete_multi_cs()
513 if (cs->staged_cs && !cs->staged_first) in complete_multi_cs()
557 struct hl_cs *cs, in cs_release_sob_reset_handler() argument
564 if (!hl_cs_cmpl->hw_sob || !cs->submitted) in cs_release_sob_reset_handler()
597 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); in cs_do_release() local
598 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
601 container_of(cs->fence, struct hl_cs_compl, base_fence); in cs_do_release()
603 cs->completed = true; in cs_do_release()
613 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
616 if (!cs->submitted) { in cs_do_release()
622 if (cs->type == CS_TYPE_WAIT || in cs_do_release()
623 cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
624 hl_fence_put(cs->signal_fence); in cs_do_release()
630 hl_hw_queue_update_ci(cs); in cs_do_release()
634 list_del_init(&cs->mirror_node); in cs_do_release()
637 cs_handle_tdr(hdev, cs); in cs_do_release()
639 if (cs->staged_cs) { in cs_do_release()
643 if (cs->staged_last) { in cs_do_release()
647 &cs->staged_cs_node, staged_cs_node) in cs_do_release()
655 if (cs->submitted) { in cs_do_release()
657 list_del(&cs->staged_cs_node); in cs_do_release()
669 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
670 && cs->encaps_signals) in cs_do_release()
671 kref_put(&cs->encaps_sig_hdl->refcount, in cs_do_release()
678 hl_debugfs_remove_cs(cs); in cs_do_release()
680 hl_ctx_put(cs->ctx); in cs_do_release()
686 if (cs->timedout) in cs_do_release()
687 cs->fence->error = -ETIMEDOUT; in cs_do_release()
688 else if (cs->aborted) in cs_do_release()
689 cs->fence->error = -EIO; in cs_do_release()
690 else if (!cs->submitted) in cs_do_release()
691 cs->fence->error = -EBUSY; in cs_do_release()
693 if (unlikely(cs->skip_reset_on_timeout)) { in cs_do_release()
696 cs->sequence, in cs_do_release()
697 div_u64(jiffies - cs->submission_time_jiffies, HZ)); in cs_do_release()
700 if (cs->timestamp) in cs_do_release()
701 cs->fence->timestamp = ktime_get(); in cs_do_release()
702 complete_all(&cs->fence->completion); in cs_do_release()
703 complete_multi_cs(hdev, cs); in cs_do_release()
705 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); in cs_do_release()
707 hl_fence_put(cs->fence); in cs_do_release()
709 kfree(cs->jobs_in_queue_cnt); in cs_do_release()
710 kfree(cs); in cs_do_release()
717 struct hl_cs *cs = container_of(work, struct hl_cs, in cs_timedout() local
719 bool skip_reset_on_timeout = cs->skip_reset_on_timeout; in cs_timedout()
721 rc = cs_get_unless_zero(cs); in cs_timedout()
725 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
726 cs_put(cs); in cs_timedout()
732 cs->timedout = true; in cs_timedout()
734 hdev = cs->ctx->hdev; in cs_timedout()
736 switch (cs->type) { in cs_timedout()
740 cs->sequence); in cs_timedout()
746 cs->sequence); in cs_timedout()
752 cs->sequence); in cs_timedout()
758 cs->sequence); in cs_timedout()
766 cs_put(cs); in cs_timedout()
783 struct hl_cs *cs; in allocate_cs() local
788 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
789 if (!cs) in allocate_cs()
790 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in allocate_cs()
792 if (!cs) { in allocate_cs()
801 cs->ctx = ctx; in allocate_cs()
802 cs->submitted = false; in allocate_cs()
803 cs->completed = false; in allocate_cs()
804 cs->type = cs_type; in allocate_cs()
805 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); in allocate_cs()
806 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); in allocate_cs()
807 cs->timeout_jiffies = timeout; in allocate_cs()
808 cs->skip_reset_on_timeout = in allocate_cs()
811 cs->submission_time_jiffies = jiffies; in allocate_cs()
812 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
813 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
814 kref_init(&cs->refcount); in allocate_cs()
815 spin_lock_init(&cs->job_lock); in allocate_cs()
828 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
829 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); in allocate_cs()
830 if (!cs->jobs_in_queue_cnt) in allocate_cs()
831 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
832 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); in allocate_cs()
834 if (!cs->jobs_in_queue_cnt) { in allocate_cs()
842 cs_cmpl->type = cs->type; in allocate_cs()
844 cs->fence = &cs_cmpl->base_fence; in allocate_cs()
876 cs->sequence = cs_cmpl->cs_seq; in allocate_cs()
889 *cs_new = cs; in allocate_cs()
895 kfree(cs->jobs_in_queue_cnt); in allocate_cs()
899 kfree(cs); in allocate_cs()
904 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
908 staged_cs_put(hdev, cs); in cs_rollback()
910 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
917 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
928 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { in hl_cs_rollback_all()
929 cs_get(cs); in hl_cs_rollback_all()
930 cs->aborted = true; in hl_cs_rollback_all()
932 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
933 cs_rollback(hdev, cs); in hl_cs_rollback_all()
934 cs_put(cs); in hl_cs_rollback_all()
983 struct hl_cs *cs = job->cs; in job_wq_completion() local
984 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
1220 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, in cs_staged_submission() argument
1227 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); in cs_staged_submission()
1228 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); in cs_staged_submission()
1230 if (cs->staged_first) { in cs_staged_submission()
1232 INIT_LIST_HEAD(&cs->staged_cs_node); in cs_staged_submission()
1233 cs->staged_sequence = cs->sequence; in cs_staged_submission()
1235 if (cs->encaps_signals) in cs_staged_submission()
1236 cs->encaps_sig_hdl_id = encaps_signal_handle; in cs_staged_submission()
1241 cs->staged_sequence = sequence; in cs_staged_submission()
1245 staged_cs_get(hdev, cs); in cs_staged_submission()
1247 cs->staged_cs = true; in cs_staged_submission()
1273 struct hl_cs *cs; in cs_ioctl_default() local
1295 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, in cs_ioctl_default()
1300 *cs_seq = cs->sequence; in cs_ioctl_default()
1302 hl_debugfs_add_cs(cs); in cs_ioctl_default()
1304 rc = cs_staged_submission(hdev, cs, user_sequence, flags, in cs_ioctl_default()
1312 if (cs->staged_cs) in cs_ioctl_default()
1313 *cs_seq = cs->staged_sequence; in cs_ioctl_default()
1370 job->cs = cs; in cs_ioctl_default()
1375 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_default()
1377 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_default()
1385 if (cs_needs_completion(cs) && in cs_ioctl_default()
1388 cs_get(cs); in cs_ioctl_default()
1398 cs->ctx->asid, cs->sequence, job->id, rc); in cs_ioctl_default()
1406 if (int_queues_only && cs_needs_completion(cs)) { in cs_ioctl_default()
1411 cs->ctx->asid, cs->sequence); in cs_ioctl_default()
1421 cs->fence->stream_master_qid_map = stream_master_qid_map; in cs_ioctl_default()
1423 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_default()
1428 cs->ctx->asid, cs->sequence, rc); in cs_ioctl_default()
1439 cs_rollback(hdev, cs); in cs_ioctl_default()
1444 cs_put(cs); in cs_ioctl_default()
1705 struct hl_ctx *ctx, struct hl_cs *cs, in cs_ioctl_signal_wait_create_jobs() argument
1723 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1738 job->cs = cs; in cs_ioctl_signal_wait_create_jobs()
1744 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1745 && cs->encaps_signals) in cs_ioctl_signal_wait_create_jobs()
1758 cs_get(cs); in cs_ioctl_signal_wait_create_jobs()
1760 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_signal_wait_create_jobs()
1762 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_signal_wait_create_jobs()
1967 struct hl_cs *cs; in cs_ioctl_signal_wait() local
2124 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); in cs_ioctl_signal_wait()
2139 cs->signal_fence = sig_fence; in cs_ioctl_signal_wait()
2144 if (cs->encaps_signals) in cs_ioctl_signal_wait()
2145 cs->encaps_sig_hdl = encaps_sig_hdl; in cs_ioctl_signal_wait()
2148 hl_debugfs_add_cs(cs); in cs_ioctl_signal_wait()
2150 *cs_seq = cs->sequence; in cs_ioctl_signal_wait()
2153 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, in cs_ioctl_signal_wait()
2157 cs, q_idx, collective_engine_id, in cs_ioctl_signal_wait()
2168 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_signal_wait()
2179 ctx->asid, cs->sequence, rc); in cs_ioctl_signal_wait()
2189 cs_rollback(hdev, cs); in cs_ioctl_signal_wait()
2194 cs_put(cs); in cs_ioctl_signal_wait()