/kernel/linux/linux-6.6/drivers/gpu/drm/radeon/ |
H A D | radeon_prime.c | 49 struct dma_resv *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 54 dma_resv_lock(resv, NULL); in radeon_gem_prime_import_sg_table() 56 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 57 dma_resv_unlock(resv); in radeon_gem_prime_import_sg_table()
|
H A D | radeon_benchmark.c | 38 struct dma_resv *resv) in radeon_benchmark_do_move() 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move() 35 radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, int flag, int n, struct dma_resv *resv) radeon_benchmark_do_move() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/ttm/ |
H A D | ttm_execbuf_util.c | 38 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation_reverse() 54 dma_resv_unlock(bo->base.resv); in ttm_eu_backoff_reservation() 102 ret = dma_resv_reserve_fences(bo->base.resv, in ttm_eu_reserve_buffers() 119 ret = dma_resv_reserve_fences(bo->base.resv, in ttm_eu_reserve_buffers() 153 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? in ttm_eu_fence_buffer_objects() 156 dma_resv_unlock(bo->base.resv); in ttm_eu_fence_buffer_objects()
|
H A D | ttm_bo_vm.c | 49 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) in ttm_bo_vm_fault_idle() 63 (void)dma_resv_wait_timeout(bo->base.resv, in ttm_bo_vm_fault_idle() 66 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 74 err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true, in ttm_bo_vm_fault_idle() 125 if (unlikely(!dma_resv_trylock(bo->base.resv))) { in ttm_bo_vm_reserve() 135 if (!dma_resv_lock_interruptible(bo->base.resv, in ttm_bo_vm_reserve() 137 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 144 if (dma_resv_lock_interruptible(bo->base.resv, NULL)) in ttm_bo_vm_reserve() 154 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 344 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault() [all...] |
/kernel/linux/linux-5.10/include/uapi/linux/ |
H A D | io_uring.h | 269 __u32 resv[3]; member 333 __u32 resv; member 339 __u32 resv; member 347 __u32 resv; member 353 __u32 resv; member 367 __u8 resv; member 375 __u16 resv; member 387 __u8 resv; member
|
/kernel/linux/linux-6.6/include/uapi/linux/ |
H A D | io_uring.h | 481 __u32 resv[3]; member 565 __u32 resv; member 585 __u32 resv; member 591 __u32 resv; member 605 __u8 resv; member 613 __u16 resv; member 625 __u8 resv; member 633 __u16 resv; member 640 * ring tail is overlaid with the io_uring_buf->resv field. 672 __u64 resv[ member 721 __u64 resv; global() member [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm_cpu.c | 42 * @resv: reservation object with embedded fence 49 struct dma_resv *resv, in amdgpu_vm_cpu_prepare() 52 if (!resv) in amdgpu_vm_cpu_prepare() 55 return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true); in amdgpu_vm_cpu_prepare() 80 r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, in amdgpu_vm_cpu_update() 48 amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) amdgpu_vm_cpu_prepare() argument
|
H A D | amdgpu_object.c | 269 bp.resv = NULL; in amdgpu_bo_create_reserved() 553 .resv = bp->resv in amdgpu_bo_create() 623 bp->resv, bp->destroy); in amdgpu_bo_create() 639 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true); in amdgpu_bo_create() 643 dma_resv_add_fence(bo->tbo.base.resv, fence, in amdgpu_bo_create() 647 if (!bp->resv) in amdgpu_bo_create() 660 if (!bp->resv) in amdgpu_bo_create() 661 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_bo_create() 791 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNE in amdgpu_bo_kmap() 1432 struct dma_resv *resv = bo->tbo.base.resv; amdgpu_bo_fence() local 1460 amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode, void *owner, bool intr) amdgpu_bo_sync_wait_resv() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_object.c | 260 bp.resv = NULL; in amdgpu_bo_create_reserved() 529 .resv = bp->resv, in amdgpu_bo_do_create() 590 NULL, bp->resv, &amdgpu_bo_destroy); in amdgpu_bo_do_create() 606 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); in amdgpu_bo_do_create() 615 if (!bp->resv) in amdgpu_bo_do_create() 628 if (!bp->resv) in amdgpu_bo_do_create() 629 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_bo_do_create() 650 bp.resv = bo->tbo.base.resv; in amdgpu_bo_create_shadow() 1429 struct dma_resv *resv = bo->tbo.base.resv; amdgpu_bo_fence() local 1451 amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode, void *owner, bool intr) amdgpu_bo_sync_wait_resv() argument [all...] |
H A D | amdgpu_vm.c | 331 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) in amdgpu_vm_bo_base_init() 621 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) in amdgpu_vm_del_from_lru_notify() 883 bp->resv = vm->root.base.bo->tbo.base.resv; in amdgpu_vm_bo_param() 1583 * @resv: fences we need to sync to 1598 bool unlocked, struct dma_resv *resv, in amdgpu_vm_bo_update_mapping() 1637 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); in amdgpu_vm_bo_update_mapping() 1656 * @resv 1596 amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate, bool unlocked, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, dma_addr_t *pages_addr, struct dma_fence **fence) amdgpu_vm_bo_update_mapping() argument 1671 amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_resv *resv, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t flags, struct amdgpu_device *bo_adev, struct drm_mm_node *nodes, struct dma_fence **fence) amdgpu_vm_bo_split_mapping() argument 1788 struct dma_resv *resv; amdgpu_vm_bo_update() local 1992 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; amdgpu_vm_prt_fini() local 2039 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; amdgpu_vm_clear_freed() local 2092 struct dma_resv *resv; amdgpu_vm_handle_moved() local [all...] |
/kernel/linux/linux-5.10/include/drm/ttm/ |
H A D | ttm_bo_driver.h | 38 #include <linux/dma-resv.h> 497 success = dma_resv_trylock(bo->base.resv); in ttm_bo_reserve() 502 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); in ttm_bo_reserve() 504 ret = dma_resv_lock(bo->base.resv, ticket); in ttm_bo_reserve() 525 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, in ttm_bo_reserve_slowpath() 531 dma_resv_lock_slow(bo->base.resv, ticket); in ttm_bo_reserve_slowpath() 575 dma_resv_unlock(bo->base.resv); in ttm_bo_unreserve()
|
H A D | ttm_bo_api.h | 43 #include <linux/dma-resv.h> 138 * Members protected by the bo::resv::reserved lock. 202 * @resv: Reservation object to allow reserved evictions with. 211 struct dma_resv *resv; member 370 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 381 * Furthermore, if resv == NULL, the buffer's reservation lock will be held, 403 struct dma_resv *resv, 422 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. 447 struct sg_table *sg, struct dma_resv *resv, 618 dma_resv_assert_held(bo->base.resv); in ttm_bo_pin() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/radeon/ |
H A D | radeon_prime.c | 66 struct dma_resv *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 71 dma_resv_lock(resv, NULL); in radeon_gem_prime_import_sg_table() 73 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 74 dma_resv_unlock(resv); in radeon_gem_prime_import_sg_table()
|
H A D | radeon_benchmark.c | 38 struct dma_resv *resv) in radeon_benchmark_do_move() 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move() 35 radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, int flag, int n, struct dma_resv *resv) radeon_benchmark_do_move() argument
|
/kernel/linux/linux-6.6/kernel/irq/ |
H A D | affinity.c | 113 unsigned int resv = affd->pre_vectors + affd->post_vectors; in irq_calc_affinity_vectors() local 116 if (resv > minvec) in irq_calc_affinity_vectors() 120 set_vecs = maxvec - resv; in irq_calc_affinity_vectors() 127 return resv + min(set_vecs, maxvec - resv); in irq_calc_affinity_vectors()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_busy.c | 108 * !dma_resv_test_signaled_rcu(obj->resv, true); in i915_gem_busy_ioctl() 113 seq = raw_read_seqcount(&obj->base.resv->seq); in i915_gem_busy_ioctl() 117 busy_check_writer(rcu_dereference(obj->base.resv->fence_excl)); in i915_gem_busy_ioctl() 120 list = rcu_dereference(obj->base.resv->fence); in i915_gem_busy_ioctl() 132 if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) in i915_gem_busy_ioctl()
|
/kernel/linux/linux-5.10/net/sunrpc/auth_gss/ |
H A D | svcauth_gss.c | 691 svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o) in svc_safe_putnetobj() argument 695 if (resv->iov_len + 4 > PAGE_SIZE) in svc_safe_putnetobj() 697 svc_putnl(resv, o->len); in svc_safe_putnetobj() 698 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj() 699 resv->iov_len += round_up_to_quad(o->len); in svc_safe_putnetobj() 700 if (resv->iov_len > PAGE_SIZE) in svc_safe_putnetobj() 1208 gss_write_resv(struct kvec *resv, size_t size_limit, in gss_write_resv() argument 1212 if (resv->iov_len + 4 > size_limit) in gss_write_resv() 1214 svc_putnl(resv, RPC_SUCCES in gss_write_resv() 1238 struct kvec *resv = &rqstp->rq_res.head[0]; svcauth_gss_legacy_init() local 1349 struct kvec *resv = &rqstp->rq_res.head[0]; svcauth_gss_proxy_init() local 1536 struct kvec *resv = &rqstp->rq_res.head[0]; svcauth_gss_accept() local 1719 struct kvec *resv; svcauth_gss_wrap_resp_integ() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/ |
H A D | drm_gem.c | 164 if (!obj->resv) in drm_gem_private_object_init() 165 obj->resv = &obj->_resv; in drm_gem_private_object_init() 782 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), in drm_gem_dma_resv_wait() 1182 dma_resv_assert_held(obj->resv); in drm_gem_vmap() 1199 dma_resv_assert_held(obj->resv); in drm_gem_vunmap() 1216 dma_resv_lock(obj->resv, NULL); in drm_gem_vmap_unlocked() 1218 dma_resv_unlock(obj->resv); in drm_gem_vmap_unlocked() 1226 dma_resv_lock(obj->resv, NULL); in drm_gem_vunmap_unlocked() 1228 dma_resv_unlock(obj->resv); in drm_gem_vunmap_unlocked() 1258 ret = dma_resv_lock_slow_interruptible(obj->resv, in drm_gem_lock_reservations() [all...] |
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/linux/ |
H A D | io_uring.h | 174 __u32 resv[3]; member 203 __u32 resv; member 209 __u8 resv; member 216 __u16 resv; member 227 __u8 resv; member
|
/kernel/linux/patches/linux-6.6/prebuilts/usr/include/linux/ |
H A D | io_uring.h | 174 __u32 resv[3]; member 203 __u32 resv; member 209 __u8 resv; member 216 __u16 resv; member 227 __u8 resv; member
|
/third_party/ltp/include/lapi/ |
H A D | io_uring.h | 185 uint64_t resv[2]; member 205 uint32_t resv[3]; member 236 uint32_t resv; member 244 uint8_t resv; member 252 uint16_t resv; member
|
/third_party/wpa_supplicant/wpa_supplicant-2.9/src/drivers/ |
H A D | wpa_hal.h | 123 uint8_t resv[3]; member 140 uint8_t resv[3]; member 147 int8_t resv[2]; member 153 uint8_t resv; member 181 int8_t resv[3]; member
|
/third_party/wpa_supplicant/wpa_supplicant-2.9_standard/src/drivers/ |
H A D | wpa_hal.h | 123 uint8_t resv[3]; member 140 uint8_t resv[3]; member 147 int8_t resv[2]; member 153 uint8_t resv; member 181 int8_t resv[3]; member
|
/kernel/linux/linux-5.10/mm/ |
H A D | hugetlb.c | 250 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument 254 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache() 256 resv->region_cache_count--; in get_file_region_entry_from_cache() 257 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache() 280 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info() 299 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info() 300 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 304 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 333 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region() argument 338 if (&prg->link != &resv in coalesce_file_region() 278 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) record_hugetlb_cgroup_uncharge_info() argument 368 add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) add_reservation_in_range() argument 514 region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) region_add() argument 582 region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) region_chg() argument 618 region_abort(struct resv_map *resv, long f, long t, long regions_needed) region_abort() argument 641 region_del(struct resv_map *resv, long f, long t) region_del() argument 768 region_count(struct resv_map *resv, long f, long t) region_count() argument 2200 struct resv_map *resv; __vma_reservation_common() local 3671 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open() local 3690 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_close() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | hugetlb.c | 437 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in get_file_region_entry_from_cache() argument 441 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache() 443 resv->region_cache_count--; in get_file_region_entry_from_cache() 444 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache() 467 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info() 486 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info() 487 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 519 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region() argument 524 if (&prg->link != &resv in coalesce_file_region() 465 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) record_hugetlb_cgroup_uncharge_info() argument 572 add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) add_reservation_in_range() argument 705 region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) region_add() argument 772 region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) region_chg() argument 808 region_abort(struct resv_map *resv, long f, long t, long regions_needed) region_abort() argument 831 region_del(struct resv_map *resv, long f, long t) region_del() argument 958 region_count(struct resv_map *resv, long f, long t) region_count() argument 2718 struct resv_map *resv; __vma_reservation_common() local 4840 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open() local 4879 struct resv_map *resv; hugetlb_vm_op_close() local [all...] |