Lines Matching refs:wa_ctx
410 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) in release_shadow_wa_ctx() argument
412 if (!wa_ctx->indirect_ctx.obj) in release_shadow_wa_ctx()
415 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL); in release_shadow_wa_ctx()
416 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); in release_shadow_wa_ctx()
417 i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); in release_shadow_wa_ctx()
418 i915_gem_object_put(wa_ctx->indirect_ctx.obj); in release_shadow_wa_ctx()
420 wa_ctx->indirect_ctx.obj = NULL; in release_shadow_wa_ctx()
421 wa_ctx->indirect_ctx.shadow_va = NULL; in release_shadow_wa_ctx()
504 workload->wa_ctx.indirect_ctx.size) { in intel_gvt_scan_and_shadow_workload()
505 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
514 release_shadow_wa_ctx(&workload->wa_ctx); in intel_gvt_scan_and_shadow_workload()
589 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) in update_wa_ctx_2_shadow_ctx() argument
592 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); in update_wa_ctx_2_shadow_ctx()
599 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; in update_wa_ctx_2_shadow_ctx()
602 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; in update_wa_ctx_2_shadow_ctx()
605 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) in prepare_shadow_wa_ctx() argument
609 (unsigned char *)wa_ctx->indirect_ctx.shadow_va + in prepare_shadow_wa_ctx()
610 wa_ctx->indirect_ctx.size; in prepare_shadow_wa_ctx()
614 if (wa_ctx->indirect_ctx.size == 0) in prepare_shadow_wa_ctx()
619 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww); in prepare_shadow_wa_ctx()
621 vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL, in prepare_shadow_wa_ctx()
640 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); in prepare_shadow_wa_ctx()
642 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); in prepare_shadow_wa_ctx()
645 update_wa_ctx_2_shadow_ctx(wa_ctx); in prepare_shadow_wa_ctx()
775 ret = prepare_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
789 release_shadow_wa_ctx(&workload->wa_ctx); in prepare_workload()
818 release_shadow_wa_ctx(&workload->wa_ctx); in dispatch_workload()
1521 release_shadow_wa_ctx(&workload->wa_ctx); in intel_vgpu_destroy_workload()
1707 workload->wa_ctx.indirect_ctx.guest_gma = in intel_vgpu_create_workload()
1709 workload->wa_ctx.indirect_ctx.size = in intel_vgpu_create_workload()
1713 if (workload->wa_ctx.indirect_ctx.size != 0) { in intel_vgpu_create_workload()
1715 workload->wa_ctx.indirect_ctx.guest_gma, in intel_vgpu_create_workload()
1716 workload->wa_ctx.indirect_ctx.size)) { in intel_vgpu_create_workload()
1718 workload->wa_ctx.indirect_ctx.guest_gma); in intel_vgpu_create_workload()
1724 workload->wa_ctx.per_ctx.guest_gma = in intel_vgpu_create_workload()
1726 workload->wa_ctx.per_ctx.valid = per_ctx & 1; in intel_vgpu_create_workload()
1727 if (workload->wa_ctx.per_ctx.valid) { in intel_vgpu_create_workload()
1729 workload->wa_ctx.per_ctx.guest_gma, in intel_vgpu_create_workload()
1732 workload->wa_ctx.per_ctx.guest_gma); in intel_vgpu_create_workload()