/linux/security/selinux/ |
A D | status.c | 48 if (!state->status_page) { in selinux_kernel_status_page() 49 state->status_page = alloc_page(GFP_KERNEL|__GFP_ZERO); in selinux_kernel_status_page() 51 if (state->status_page) { in selinux_kernel_status_page() 52 status = page_address(state->status_page); in selinux_kernel_status_page() 68 result = state->status_page; in selinux_kernel_status_page() 85 if (state->status_page) { in selinux_status_update_setenforce() 86 status = page_address(state->status_page); in selinux_status_update_setenforce() 111 if (state->status_page) { in selinux_status_update_policyload() 112 status = page_address(state->status_page); in selinux_status_update_policyload()
|
/linux/drivers/gpu/drm/i915/gt/ |
A D | intel_engine.h | 136 return READ_ONCE(engine->status_page.addr[reg]); in intel_read_status_page() 149 clflush(&engine->status_page.addr[reg]); in intel_write_status_page() 150 engine->status_page.addr[reg] = value; in intel_write_status_page() 151 clflush(&engine->status_page.addr[reg]); in intel_write_status_page() 154 WRITE_ONCE(engine->status_page.addr[reg], value); in intel_write_status_page()
|
A D | intel_ring_submission.c | 54 static struct page *status_page(struct intel_engine_cs *engine) in status_page() function 56 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; in status_page() 64 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); in ring_setup_phys_status_page() 129 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); in ring_setup_status_page() 269 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp() 285 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); in xcs_sanitize() 295 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); in xcs_sanitize() 1372 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); in intel_ring_submission_setup()
|
A D | selftest_lrc.c | 76 i915_ggtt_offset(ce->engine->status_page.vma) + in emit_semaphore_signal() 525 i915_ggtt_offset(ce->engine->status_page.vma) + in __gpr_read() 580 u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4); in __live_lrc_gpr() 702 i915_ggtt_offset(ce->engine->status_page.vma) + in create_timestamp() 761 u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4); in __lrc_timestamp() 1050 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in record_registers() 1170 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in poison_registers() 1297 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); in __lrc_isolation()
|
A D | selftest_engine_pm.c | 75 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5); in __measure_timestamps() 76 u32 offset = i915_ggtt_offset(engine->status_page.vma); in __measure_timestamps()
|
A D | gen6_engine_cs.c | 376 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen6_emit_breadcrumb_xcs() 396 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in gen7_emit_breadcrumb_xcs()
|
A D | intel_engine_cs.c | 708 vma = fetch_and_zero(&engine->status_page.vma); in cleanup_status_page() 752 INIT_LIST_HEAD(&engine->status_page.timelines); in init_status_page() 790 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); in init_status_page() 791 engine->status_page.vma = vma; in init_status_page() 952 struct i915_vma *hwsp = engine->status_page.vma; in intel_engine_destroy_pinned_context() 1603 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; in intel_engine_print_registers() 1911 hexdump(m, engine->status_page.addr, PAGE_SIZE); in intel_engine_dump()
|
A D | intel_timeline.c | 167 struct i915_vma *hwsp = engine->status_page.vma; in intel_timeline_create_from_engine() 176 list_add_tail(&tl->engine_link, &engine->status_page.timelines); in intel_timeline_create_from_engine()
|
A D | mock_engine.c | 345 engine->base.status_page.addr = (void *)(engine + 1); in mock_engine() 399 engine->status_page.vma = ce->timeline->hwsp_ggtt; in mock_engine_init()
|
A D | intel_engine_pm.c | 183 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); in switch_to_kernel_context()
|
A D | gen2_engine_cs.c | 145 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); in __gen2_emit_breadcrumb()
|
A D | intel_execlists_submission.c | 242 engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; in ring_set_paused() 2760 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp() 2778 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); in execlists_sanitize() 2790 clflush_cache_range(engine->status_page.addr, PAGE_SIZE); in execlists_sanitize() 2856 i915_ggtt_offset(engine->status_page.vma)); in enable_execlists() 3428 (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; in intel_execlists_submission_setup() 3431 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; in intel_execlists_submission_setup()
|
A D | intel_engine_types.h | 377 struct intel_hw_status_page status_page; member
|
A D | gen8_engine_cs.c | 334 return (i915_ggtt_offset(engine->status_page.vma) + in preempt_address()
|
A D | selftest_execlists.c | 1055 i915_ggtt_offset(ce->engine->status_page.vma) + in create_rewinder() 1154 slot = memset32(engine->status_page.addr + 1000, 0, 4); in live_timeslice_rewind()
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | device.c | 871 rdev->status_page = (struct t4_dev_status_page *) in c4iw_rdev_open() 873 if (!rdev->status_page) { in c4iw_rdev_open() 877 rdev->status_page->qp_start = rdev->lldi.vr->qp.start; in c4iw_rdev_open() 878 rdev->status_page->qp_size = rdev->lldi.vr->qp.size; in c4iw_rdev_open() 879 rdev->status_page->cq_start = rdev->lldi.vr->cq.start; in c4iw_rdev_open() 880 rdev->status_page->cq_size = rdev->lldi.vr->cq.size; in c4iw_rdev_open() 899 rdev->status_page->db_off = 0; in c4iw_rdev_open() 910 free_page((unsigned long)rdev->status_page); in c4iw_rdev_open() 926 free_page((unsigned long)rdev->status_page); in c4iw_rdev_close() 1280 ctx->dev->rdev.status_page->db_off = 1; in stop_queues() [all …]
|
A D | provider.c | 113 mm->addr = virt_to_phys(rhp->rdev.status_page); in c4iw_alloc_ucontext()
|
A D | iw_cxgb4.h | 187 struct t4_dev_status_page *status_page; member
|
A D | qp.c | 1251 if (!rhp->rdev.status_page->db_off) { in c4iw_post_send() 1331 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
|
/linux/drivers/infiniband/hw/qib/ |
A D | qib_init.c | 310 u64 *status_page; in init_pioavailregs() local 326 status_page = (u64 *) in init_pioavailregs() 331 dd->devstatusp = status_page; in init_pioavailregs() 332 *status_page++ = 0; in init_pioavailregs() 334 dd->pport[pidx].statusp = status_page; in init_pioavailregs() 335 *status_page++ = 0; in init_pioavailregs() 342 dd->freezemsg = (char *) status_page; in init_pioavailregs() 345 ret = (char *) status_page - (char *) dd->pioavailregs_dma; in init_pioavailregs()
|
/linux/drivers/gpu/drm/i915/selftests/ |
A D | i915_perf.c | 354 store = memset32(rq->engine->status_page.addr + 512, 0, 32); in live_noa_gpr() 372 *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) + in live_noa_gpr()
|
A D | i915_request.c | 1816 return memset32(ce->engine->status_page.addr + 1000, 0, 21); in hwsp_scratch() 1821 return (i915_ggtt_offset(ce->engine->status_page.vma) + in hwsp_offset() 2051 i915_ggtt_offset(engine->status_page.vma) + in plug()
|
/linux/security/selinux/include/ |
A D | security.h | 101 struct page *status_page; member
|
/linux/drivers/gpu/drm/i915/gt/uc/ |
A D | intel_guc_submission.c | 3216 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp() 3232 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); in guc_sanitize() 3242 clflush_cache_range(engine->status_page.addr, PAGE_SIZE); in guc_sanitize() 3253 i915_ggtt_offset(engine->status_page.vma)); in setup_hwsp()
|
/linux/drivers/gpu/drm/i915/ |
A D | i915_gpu_error.c | 1445 engine->status_page.vma, in intel_engine_coredump_add_vma()
|