/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_gem_ww.c | 5 #include <linux/dma-resv.h> 51 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx); in i915_gem_ww_ctx_backoff() 53 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx); in i915_gem_ww_ctx_backoff()
|
H A D | i915_deps.c | 214 * @resv: The reservation object, then fences of which to add. 218 * Calls i915_deps_add_depencency() on the indicated fences of @resv. 222 int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, in i915_deps_add_resv() argument 228 dma_resv_assert_held(resv); in i915_deps_add_resv() 229 dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) { in i915_deps_add_resv()
|
/kernel/linux/linux-5.10/mm/ |
H A D | hugetlb.c | 250 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument 254 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache() 256 resv->region_cache_count--; in get_file_region_entry_from_cache() 257 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache() 280 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info() 299 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info() 300 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 304 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 333 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region() argument 338 if (&prg->link != &resv in coalesce_file_region() 278 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) record_hugetlb_cgroup_uncharge_info() argument 368 add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) add_reservation_in_range() argument 514 region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) region_add() argument 582 region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) region_chg() argument 618 region_abort(struct resv_map *resv, long f, long t, long regions_needed) region_abort() argument 641 region_del(struct resv_map *resv, long f, long t) region_del() argument 768 region_count(struct resv_map *resv, long f, long t) region_count() argument 2200 struct resv_map *resv; __vma_reservation_common() local 3671 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open() local 3690 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_close() local [all...] |
H A D | hugetlb_cgroup.c | 380 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, in hugetlb_cgroup_uncharge_counter() argument 383 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter || in hugetlb_cgroup_uncharge_counter() 384 !resv->css) in hugetlb_cgroup_uncharge_counter() 387 page_counter_uncharge(resv->reservation_counter, in hugetlb_cgroup_uncharge_counter() 388 (end - start) * resv->pages_per_hpage); in hugetlb_cgroup_uncharge_counter() 389 css_put(resv->css); in hugetlb_cgroup_uncharge_counter() 392 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, in hugetlb_cgroup_uncharge_file_region() argument 397 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) in hugetlb_cgroup_uncharge_file_region() 400 if (rg->reservation_counter && resv in hugetlb_cgroup_uncharge_file_region() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | hugetlb.c | 437 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument 441 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache() 443 resv->region_cache_count--; in get_file_region_entry_from_cache() 444 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache() 467 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info() 486 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info() 487 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 519 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region() argument 524 if (&prg->link != &resv in coalesce_file_region() 465 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) record_hugetlb_cgroup_uncharge_info() argument 572 add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) add_reservation_in_range() argument 705 region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) region_add() argument 772 region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) region_chg() argument 808 region_abort(struct resv_map *resv, long f, long t, long regions_needed) region_abort() argument 831 region_del(struct resv_map *resv, long f, long t) region_del() argument 958 region_count(struct resv_map *resv, long f, long t) region_count() argument 2718 struct resv_map *resv; __vma_reservation_common() local 4840 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open() local 4879 struct resv_map *resv; hugetlb_vm_op_close() local [all...] |
H A D | hugetlb_cgroup.c | 423 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, in hugetlb_cgroup_uncharge_counter() argument 426 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter || in hugetlb_cgroup_uncharge_counter() 427 !resv->css) in hugetlb_cgroup_uncharge_counter() 430 page_counter_uncharge(resv->reservation_counter, in hugetlb_cgroup_uncharge_counter() 431 (end - start) * resv->pages_per_hpage); in hugetlb_cgroup_uncharge_counter() 432 css_put(resv->css); in hugetlb_cgroup_uncharge_counter() 435 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, in hugetlb_cgroup_uncharge_file_region() argument 440 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) in hugetlb_cgroup_uncharge_file_region() 443 if (rg->reservation_counter && resv in hugetlb_cgroup_uncharge_file_region() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm.c | 331 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 621 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify() 883 bp->resv = vm->root.base.bo->tbo.base.resv; in amdgpu_vm_bo_param() 1583 * @resv: fences we need to sync to 1598 bool unlocked, struct dma_resv *resv, in amdgpu_vm_bo_update_mapping() 1637 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); in amdgpu_vm_bo_update_mapping() 1656 * @resv 1596 amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate, bool unlocked, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, dma_addr_t *pages_addr, struct dma_fence **fence) amdgpu_vm_bo_update_mapping() argument 1671 amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_resv *resv, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t flags, struct amdgpu_device *bo_adev, struct drm_mm_node *nodes, struct dma_fence **fence) amdgpu_vm_bo_split_mapping() argument 1788 struct dma_resv *resv; amdgpu_vm_bo_update() local 1992 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; amdgpu_vm_prt_fini() local 2039 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; amdgpu_vm_clear_freed() local 2092 struct dma_resv *resv; amdgpu_vm_handle_moved() local [all...] |
H A D | amdgpu_sync.c | 194 * @resv: reservation object with embedded fence 201 struct dma_resv *resv, enum amdgpu_sync_mode mode, in amdgpu_sync_resv() 209 if (resv == NULL) in amdgpu_sync_resv() 213 f = dma_resv_get_excl(resv); in amdgpu_sync_resv() 216 flist = dma_resv_get_list(resv); in amdgpu_sync_resv() 224 dma_resv_held(resv)); in amdgpu_sync_resv() 200 amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner) amdgpu_sync_resv() argument
|
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
H A D | radeon_sync.c | 83 * @resv: reservation object with embedded fence 90 struct dma_resv *resv, in radeon_sync_resv() 100 f = dma_resv_get_excl(resv); in radeon_sync_resv() 107 flist = dma_resv_get_list(resv); in radeon_sync_resv() 113 dma_resv_held(resv)); in radeon_sync_resv() 88 radeon_sync_resv(struct radeon_device *rdev, struct radeon_sync *sync, struct dma_resv *resv, bool shared) radeon_sync_resv() argument
|
/third_party/rust/crates/rustix/src/ |
H A D | io_uring.rs | 770 pub resv: u8, 794 pub resv: [u32; 3], 835 pub resv: u16, 845 pub resv: u8, 855 pub resv: u32, 864 pub resv: u32, 875 pub resv: u32, 884 pub resv: u32, 1096 check_struct_field!(io_uring_restriction, resv); in io_uring_layouts() 1109 resv, in io_uring_layouts() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/loongson/ |
H A D | lsdc_gem.c | 154 struct dma_resv *resv) in lsdc_gem_object_create() 161 lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv); in lsdc_gem_object_create() 188 struct dma_resv *resv = attach->dmabuf->resv; in lsdc_prime_import_sg_table() local 193 dma_resv_lock(resv, NULL); in lsdc_prime_import_sg_table() 195 sg, resv); in lsdc_prime_import_sg_table() 196 dma_resv_unlock(resv); in lsdc_prime_import_sg_table() 149 lsdc_gem_object_create(struct drm_device *ddev, u32 domain, size_t size, bool kerenl, struct sg_table *sg, struct dma_resv *resv) lsdc_gem_object_create() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_clflush.c | 113 dma_resv_reserve_fences(obj->base.resv, 1) == 0) in i915_gem_clflush_object() 117 obj->base.resv, true, in i915_gem_clflush_object() 120 dma_resv_add_fence(obj->base.resv, &clflush->base.dma, in i915_gem_clflush_object()
|
H A D | i915_gem_dmabuf.c | 9 #include <linux/dma-resv.h> 226 exp_info.resv = obj->base.resv; in i915_gem_prime_export() 324 obj->base.resv = dma_buf->resv; in i915_gem_prime_import()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_gt_buffer_pool.c | 105 struct dma_resv *resv = node->obj->base.resv; in pool_active() local 108 if (dma_resv_trylock(resv)) { in pool_active() 109 dma_resv_add_excl_fence(resv, NULL); in pool_active() 110 dma_resv_unlock(resv); in pool_active()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_dmabuf.c | 9 #include <linux/dma-resv.h> 179 exp_info.resv = obj->base.resv; in i915_gem_prime_export() 257 obj->base.resv = dma_buf->resv; in i915_gem_prime_import()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm_sdma.c | 80 * @resv: reservation object with embedded fence 87 struct dma_resv *resv, in amdgpu_vm_sdma_prepare() 97 if (!resv) in amdgpu_vm_sdma_prepare() 101 r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); in amdgpu_vm_sdma_prepare() 138 dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f, in amdgpu_vm_sdma_commit() 239 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); in amdgpu_vm_sdma_update() 86 amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) amdgpu_vm_sdma_prepare() argument
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | hugetlb_cgroup.h | 147 extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, 151 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, 161 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, in hugetlb_cgroup_uncharge_file_region() argument 259 static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, in hugetlb_cgroup_uncharge_counter() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | hugetlb_cgroup.h | 157 extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, 161 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, 171 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, in hugetlb_cgroup_uncharge_file_region() argument 266 static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, in hugetlb_cgroup_uncharge_counter() argument
|
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | pr.c | 246 struct pr_held_reservation *resv) in nvme_pr_read_reservation() 264 resv->generation = le32_to_cpu(tmp_rse.gen); in nvme_pr_read_reservation() 282 resv->generation = le32_to_cpu(rse->gen); in nvme_pr_read_reservation() 283 resv->type = block_pr_type_from_nvme(rse->rtype); in nvme_pr_read_reservation() 288 resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); in nvme_pr_read_reservation() 296 resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); in nvme_pr_read_reservation() 245 nvme_pr_read_reservation(struct block_device *bdev, struct pr_held_reservation *resv) nvme_pr_read_reservation() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/radeon/ |
H A D | radeon_object.c | 133 struct dma_resv *resv, in radeon_bo_create() 206 &bo->placement, page_align, !kernel, sg, resv, in radeon_bo_create() 224 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, in radeon_bo_kmap() 550 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_get_surface_reg() 675 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_get_tiling_flags() 687 dma_resv_assert_held(bo->tbo.base.resv); in radeon_bo_check_tiling() 789 struct dma_resv *resv = bo->tbo.base.resv; in radeon_bo_fence() local 792 r = dma_resv_reserve_fences(resv, 1); in radeon_bo_fence() 799 dma_resv_add_fence(resv, in radeon_bo_fence() 130 radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, struct dma_resv *resv, struct radeon_bo **bo_ptr) radeon_bo_create() argument [all...] |
/kernel/linux/linux-6.6/include/linux/sunrpc/ |
H A D | svc.h | 490 struct kvec *resv = buf->head; in svcxdr_init_encode() local 495 xdr->iov = resv; in svcxdr_init_encode() 496 xdr->p = resv->iov_base + resv->iov_len; in svcxdr_init_encode() 497 xdr->end = resv->iov_base + PAGE_SIZE; in svcxdr_init_encode() 498 buf->len = resv->iov_len; in svcxdr_init_encode() 537 struct kvec *resv = buf->head; in svcxdr_set_auth_slack() local 544 WARN_ON(xdr->iov != resv); in svcxdr_set_auth_slack()
|
/third_party/ltp/testcases/network/stress/ns-tools/ |
H A D | ns-mcast.h | 99 uint8_t resv:4; member 101 uint8_t resv:4; member
|
/kernel/linux/linux-5.10/kernel/irq/ |
H A D | affinity.c | 500 unsigned int resv = affd->pre_vectors + affd->post_vectors; in irq_calc_affinity_vectors() local 503 if (resv > minvec) in irq_calc_affinity_vectors() 507 set_vecs = maxvec - resv; in irq_calc_affinity_vectors() 514 return resv + min(set_vecs, maxvec - resv); in irq_calc_affinity_vectors()
|
/kernel/linux/linux-6.6/fs/xfs/scrub/ |
H A D | reap.c | 88 enum xfs_ag_resv_type resv; member 316 if (rs->resv == XFS_AG_RESV_AGFL) in xreap_agextent_select() 399 if (rs->resv == XFS_AG_RESV_AGFL) { in xreap_agextent_iter() 414 rs->resv, true); in xreap_agextent_iter() 483 .resv = type, in xrep_reap_agblocks()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/ |
H A D | msm_gem.h | 11 #include <linux/dma-resv.h> 183 dma_resv_lock(obj->resv, NULL); in msm_gem_lock() 189 return dma_resv_lock_interruptible(obj->resv, NULL); in msm_gem_lock_interruptible() 195 dma_resv_unlock(obj->resv); in msm_gem_unlock() 215 (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD) in msm_gem_assert_locked()
|