/linux/fs/ocfs2/ |
A D | reservations.c | 57 return resv->r_start + resv->r_len - 1; in ocfs2_resv_end() 89 ocfs2_resv_end(resv), resv->r_len, resv->r_last_start, in ocfs2_dump_resv() 102 ocfs2_resv_end(resv), resv->r_len, resv->r_last_start, in ocfs2_dump_resv() 154 if (resv->r_start > ocfs2_resv_end(resv)) { in ocfs2_check_resmap() 189 memset(resv, 0, sizeof(*resv)); in ocfs2_resv_init_once() 251 resv->r_last_len = resv->r_last_start = 0; in __ocfs2_resv_discard() 672 resv->r_len, resv->r_last_start, in ocfs2_cannibalize_resv() 692 goal = resv->r_last_start + resv->r_last_len; in ocfs2_resv_find_window() 780 resv->r_len = old_end - resv->r_start + 1; in ocfs2_adjust_resv_from_alloc() 800 ocfs2_resv_end(resv), resv->r_len, in ocfs2_resmap_claimed_bits() [all …]
|
A D | reservations.h | 54 void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv); 57 void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv, 71 struct ocfs2_alloc_reservation *resv); 127 struct ocfs2_alloc_reservation *resv, 146 struct ocfs2_alloc_reservation *resv,
|
/linux/drivers/gpu/drm/ttm/ |
A D | ttm_bo.c | 262 bo->base.resv = &bo->base._resv; in ttm_bo_individualize_resv() 353 dma_resv_unlock(bo->base.resv); in ttm_bo_cleanup_refs() 466 dma_resv_unlock(bo->base.resv); in ttm_bo_release() 608 if (bo->base.resv == ctx->resv) { in ttm_bo_evict_swapout_allowable() 962 struct dma_resv *resv, in ttm_bo_init_reserved() argument 980 if (resv) { in ttm_bo_init_reserved() 981 bo->base.resv = resv; in ttm_bo_init_reserved() 1006 if (!resv) { in ttm_bo_init_reserved() 1015 if (!resv) in ttm_bo_init_reserved() 1036 struct dma_resv *resv, in ttm_bo_init() argument [all …]
|
A D | ttm_execbuf_util.c | 42 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation_reverse() 58 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation() 107 ret = dma_resv_reserve_shared(bo->base.resv, in ttm_eu_reserve_buffers() 124 ret = dma_resv_reserve_shared(bo->base.resv, in ttm_eu_reserve_buffers() 159 dma_resv_add_shared_fence(bo->base.resv, fence); in ttm_eu_fence_buffer_objects() 161 dma_resv_add_excl_fence(bo->base.resv, fence); in ttm_eu_fence_buffer_objects() 163 dma_resv_unlock(bo->base.resv); in ttm_eu_fence_buffer_objects()
|
A D | ttm_bo_vm.c | 74 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 138 if (unlikely(!dma_resv_trylock(bo->base.resv))) { in ttm_bo_vm_reserve() 148 if (!dma_resv_lock_interruptible(bo->base.resv, in ttm_bo_vm_reserve() 150 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 157 if (dma_resv_lock_interruptible(bo->base.resv, NULL)) in ttm_bo_vm_reserve() 167 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 350 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault()
|
/linux/fs/xfs/libxfs/ |
A D | xfs_ag_resv.c | 134 struct xfs_ag_resv *resv; in __xfs_ag_resv_free() local 151 oldresv = resv->ar_reserved; in __xfs_ag_resv_free() 153 resv->ar_reserved = 0; in __xfs_ag_resv_free() 154 resv->ar_asked = 0; in __xfs_ag_resv_free() 155 resv->ar_orig_reserved = 0; in __xfs_ag_resv_free() 186 struct xfs_ag_resv *resv; in __xfs_ag_resv_init() local 239 resv->ar_asked = ask; in __xfs_ag_resv_init() 354 struct xfs_ag_resv *resv; in xfs_ag_resv_alloc_extent() local 378 resv->ar_reserved -= len; in xfs_ag_resv_alloc_extent() 398 struct xfs_ag_resv *resv; in xfs_ag_resv_free_extent() local [all …]
|
/linux/drivers/dma-buf/ |
A D | dma-buf.c | 80 dma_resv_fini(dmabuf->resv); in dma_buf_release() 233 struct dma_resv *resv; in dma_buf_poll() local 240 resv = dmabuf->resv; in dma_buf_poll() 248 dma_resv_lock(resv, NULL); in dma_buf_poll() 296 dma_resv_unlock(resv); in dma_buf_poll() 497 struct dma_resv *resv = exp_info->resv; in dma_buf_export() local 502 if (!exp_info->resv) in dma_buf_export() 542 if (!resv) { in dma_buf_export() 544 dma_resv_init(resv); in dma_buf_export() 546 dmabuf->resv = resv; in dma_buf_export() [all …]
|
/linux/drivers/gpu/drm/i915/ |
A D | dma_resv_utils.c | 10 void dma_resv_prune(struct dma_resv *resv) in dma_resv_prune() argument 12 if (dma_resv_trylock(resv)) { in dma_resv_prune() 13 if (dma_resv_test_signaled(resv, true)) in dma_resv_prune() 14 dma_resv_add_excl_fence(resv, NULL); in dma_resv_prune() 15 dma_resv_unlock(resv); in dma_resv_prune()
|
/linux/drivers/gpu/drm/vgem/ |
A D | vgem_fence.c | 131 struct dma_resv *resv; in vgem_fence_attach_ioctl() local 153 resv = obj->resv; in vgem_fence_attach_ioctl() 154 if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { in vgem_fence_attach_ioctl() 161 dma_resv_lock(resv, NULL); in vgem_fence_attach_ioctl() 163 dma_resv_add_excl_fence(resv, fence); in vgem_fence_attach_ioctl() 164 else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0) in vgem_fence_attach_ioctl() 165 dma_resv_add_shared_fence(resv, fence); in vgem_fence_attach_ioctl() 166 dma_resv_unlock(resv); in vgem_fence_attach_ioctl()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
A D | amdgpu_dma_buf.c | 334 struct dma_resv *resv = dma_buf->resv; in amdgpu_dma_buf_create_obj() local 341 dma_resv_lock(resv, NULL); in amdgpu_dma_buf_create_obj() 351 ttm_bo_type_sg, resv, &gobj); in amdgpu_dma_buf_create_obj() 359 dma_resv_unlock(resv); in amdgpu_dma_buf_create_obj() 363 dma_resv_unlock(resv); in amdgpu_dma_buf_create_obj() 379 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); in amdgpu_dma_buf_move_notify() 398 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_dma_buf_move_notify() local 405 r = dma_resv_lock(resv, ticket); in amdgpu_dma_buf_move_notify() 414 if (!dma_resv_trylock(resv)) in amdgpu_dma_buf_move_notify() 426 dma_resv_unlock(resv); in amdgpu_dma_buf_move_notify()
|
A D | amdgpu_vm.c | 375 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 665 if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify() 952 bp.resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_pt_create() 967 resv = bp.resv; in amdgpu_vm_pt_create() 973 bp.resv = bo->tbo.base.resv; in amdgpu_vm_pt_create() 978 if (!resv) in amdgpu_vm_pt_create() 1894 resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_bo_update() 1898 resv = bo->tbo.base.resv; in amdgpu_vm_bo_update() 2104 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini() local 2150 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_clear_freed() local [all …]
|
A D | amdgpu_object.c | 249 bp.resv = NULL; in amdgpu_bo_create_reserved() 533 .resv = bp->resv in amdgpu_bo_create() 591 bp->resv, bp->destroy); in amdgpu_bo_create() 616 if (!bp->resv) in amdgpu_bo_create() 629 if (!bp->resv) in amdgpu_bo_create() 630 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_bo_create() 1282 if (bo->base.resv == &bo->base._resv) in amdgpu_bo_release_notify() 1289 dma_resv_lock(bo->base.resv, NULL); in amdgpu_bo_release_notify() 1297 dma_resv_unlock(bo->base.resv); in amdgpu_bo_release_notify() 1369 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_bo_fence() local [all …]
|
/linux/include/uapi/linux/ |
A D | io_uring.h | 273 __u32 resv[3]; member 337 __u32 resv; member 343 __u32 resv; member 351 __u32 resv; member 357 __u32 resv; member 371 __u8 resv; member 379 __u16 resv; member 391 __u8 resv; member
|
/linux/include/drm/ttm/ |
A D | ttm_bo_driver.h | 143 success = dma_resv_trylock(bo->base.resv); in ttm_bo_reserve() 148 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); in ttm_bo_reserve() 150 ret = dma_resv_lock(bo->base.resv, ticket); in ttm_bo_reserve() 171 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, in ttm_bo_reserve_slowpath() 177 dma_resv_lock_slow(bo->base.resv, ticket); in ttm_bo_reserve_slowpath() 220 dma_resv_unlock(bo->base.resv); in ttm_bo_unreserve()
|
A D | ttm_bo_api.h | 214 struct dma_resv *resv; member 388 struct sg_table *sg, struct dma_resv *resv, 432 struct sg_table *sg, struct dma_resv *resv, 552 dma_resv_assert_held(bo->base.resv); in ttm_bo_pin() 565 dma_resv_assert_held(bo->base.resv); in ttm_bo_unpin()
|
/linux/drivers/infiniband/core/ |
A D | umem_dmabuf.c | 24 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_map_pages() 71 fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_map_pages() 81 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_unmap_pages() 196 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); in ib_umem_dmabuf_get_pinned() 205 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned() 212 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned() 222 dma_resv_lock(dmabuf->resv, NULL); in ib_umem_dmabuf_release() 226 dma_resv_unlock(dmabuf->resv); in ib_umem_dmabuf_release()
|
/linux/net/sunrpc/ |
A D | svc.c | 1269 svc_putu32(resv, rqstp->rq_xid); in svc_process_common() 1274 svc_putnl(resv, 1); /* REPLY */ in svc_process_common() 1280 reply_statp = resv->iov_base + resv->iov_len; in svc_process_common() 1346 statp = resv->iov_base +resv->iov_len; in svc_process_common() 1347 svc_putnl(resv, RPC_SUCCESS); in svc_process_common() 1366 resv->iov_len = ((void*)statp) - resv->iov_base + 4; in svc_process_common() 1396 svc_putnl(resv, 1); /* REJECT */ in svc_process_common() 1399 svc_putnl(resv, 2); in svc_process_common() 1408 svc_putnl(resv, 1); /* REJECT */ in svc_process_common() 1469 resv->iov_len = 0; in svc_process() [all …]
|
/linux/drivers/gpu/drm/i915/gem/ |
A D | i915_gem_wait.c | 36 i915_gem_object_wait_reservation(struct dma_resv *resv, in i915_gem_object_wait_reservation() argument 48 ret = dma_resv_get_fences(resv, &excl, &count, &shared); in i915_gem_object_wait_reservation() 76 excl = dma_resv_get_excl_unlocked(resv); in i915_gem_object_wait_reservation() 89 dma_resv_prune(resv); in i915_gem_object_wait_reservation() 161 ret = dma_resv_get_fences(obj->base.resv, &excl, &count, in i915_gem_object_wait_priority() 173 excl = dma_resv_get_excl_unlocked(obj->base.resv); in i915_gem_object_wait_priority() 197 timeout = i915_gem_object_wait_reservation(obj->base.resv, in i915_gem_object_wait()
|
A D | i915_gem_busy.c | 146 seq = raw_read_seqcount(&obj->base.resv->seq); in i915_gem_busy_ioctl() 149 args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); in i915_gem_busy_ioctl() 152 list = dma_resv_shared_list(obj->base.resv); in i915_gem_busy_ioctl() 164 if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) in i915_gem_busy_ioctl()
|
/linux/drivers/gpu/drm/radeon/ |
A D | radeon_prime.c | 47 struct dma_resv *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 52 dma_resv_lock(resv, NULL); in radeon_gem_prime_import_sg_table() 54 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 55 dma_resv_unlock(resv); in radeon_gem_prime_import_sg_table()
|
A D | radeon_benchmark.c | 38 struct dma_resv *resv) in radeon_benchmark_do_move() argument 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move()
|
A D | radeon_sync.c | 91 struct dma_resv *resv, in radeon_sync_resv() argument 101 f = dma_resv_excl_fence(resv); in radeon_sync_resv() 108 flist = dma_resv_shared_list(resv); in radeon_sync_resv() 114 dma_resv_held(resv)); in radeon_sync_resv()
|
/linux/net/sunrpc/auth_gss/ |
A D | svcauth_gss.c | 699 svc_putnl(resv, o->len); in svc_safe_putnetobj() 700 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj() 1542 __be32 *reject_stat = resv->iov_base + resv->iov_len; in svcauth_gss_accept() 1625 svcdata->verf_start = resv->iov_base + resv->iov_len; in svcauth_gss_accept() 1636 svc_putnl(resv, 0); in svcauth_gss_accept() 1637 svc_putnl(resv, 0); in svcauth_gss_accept() 1645 svc_putnl(resv, 0); in svcauth_gss_accept() 1646 svc_putnl(resv, 0); in svcauth_gss_accept() 1719 struct kvec *resv; in svcauth_gss_wrap_resp_integ() local 1744 resv = &resbuf->tail[0]; in svcauth_gss_wrap_resp_integ() [all …]
|
/linux/mm/ |
A D | hugetlb.c | 456 __must_hold(&resv->lock) in allocate_file_region_entries() 484 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); in allocate_file_region_entries() 494 spin_lock(&resv->lock); in allocate_file_region_entries() 533 spin_lock(&resv->lock); in region_add() 599 spin_lock(&resv->lock); in region_chg() 633 spin_lock(&resv->lock); in region_abort() 661 spin_lock(&resv->lock); in region_del() 682 resv->region_cache_count > resv->adds_in_progress) { in region_del() 786 spin_lock(&resv->lock); in region_count() 2494 struct resv_map *resv; in __vma_reservation_common() local [all …]
|
A D | hugetlb_cgroup.c | 377 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, in hugetlb_cgroup_uncharge_counter() argument 380 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter || in hugetlb_cgroup_uncharge_counter() 381 !resv->css) in hugetlb_cgroup_uncharge_counter() 384 page_counter_uncharge(resv->reservation_counter, in hugetlb_cgroup_uncharge_counter() 385 (end - start) * resv->pages_per_hpage); in hugetlb_cgroup_uncharge_counter() 386 css_put(resv->css); in hugetlb_cgroup_uncharge_counter() 389 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, in hugetlb_cgroup_uncharge_file_region() argument 394 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) in hugetlb_cgroup_uncharge_file_region() 397 if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 && in hugetlb_cgroup_uncharge_file_region() 398 !resv->reservation_counter) { in hugetlb_cgroup_uncharge_file_region() [all …]
|