Home
last modified time | relevance | path

Searched refs:objects (Results 1 - 25 of 141) sorted by relevance

123456

/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/
H A Di915_gem_region.c18 mutex_lock(&mem->objects.lock); in i915_gem_object_init_memory_region()
19 list_add(&obj->mm.region_link, &mem->objects.list); in i915_gem_object_init_memory_region()
20 mutex_unlock(&mem->objects.lock); in i915_gem_object_init_memory_region()
27 mutex_lock(&mem->objects.lock); in i915_gem_object_release_memory_region()
29 mutex_unlock(&mem->objects.lock); in i915_gem_object_release_memory_region()
85 * the GTT, due to alignemnt restrictions. For such special objects, in __i915_gem_object_create_region()
87 * revisit this, either by allowing special mis-aligned objects in the in __i915_gem_object_create_region()
141 * i915_gem_process_region - Iterate over all objects of a region using ops
142 * to process and optionally skip objects
147 * checking whether to skip objects, an
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
H A Di915_gem_evict.c38 struct list_head *objects) in quirk_add()
40 /* quirk is only for live tiled objects, use it to declare ownership */ in quirk_add()
43 list_add(&obj->st_link, objects); in quirk_add()
46 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) in populate_ggtt() argument
69 quirk_add(obj, objects); in populate_ggtt()
76 pr_err("No objects on the GGTT inactive list!\n"); in populate_ggtt()
109 LIST_HEAD(objects); in igt_evict_something()
112 /* Fill the GGTT with pinned objects and try to evict one. */ in igt_evict_something()
114 err = populate_ggtt(ggtt, &objects); in igt_evict_something()
147 cleanup_objects(ggtt, &objects); in igt_evict_something()
37 quirk_add(struct drm_i915_gem_object *obj, struct list_head *objects) quirk_add() argument
[all...]
H A Dintel_memory_region.c28 struct list_head *objects) in close_objects()
33 list_for_each_entry_safe(obj, on, objects, st_link) { in close_objects()
55 LIST_HEAD(objects); in igt_mock_fill()
78 list_add(&obj->st_link, &objects); in igt_mock_fill()
94 close_objects(mem, &objects); in igt_mock_fill()
101 struct list_head *objects, in igt_object_create()
116 list_add(&obj->st_link, objects); in igt_object_create()
137 LIST_HEAD(objects); in igt_mock_contiguous()
148 obj = igt_object_create(mem, &objects, mem->mm.chunk_size, in igt_mock_contiguous()
162 obj = igt_object_create(mem, &objects, tota in igt_mock_contiguous()
27 close_objects(struct intel_memory_region *mem, struct list_head *objects) close_objects() argument
100 igt_object_create(struct intel_memory_region *mem, struct list_head *objects, u64 size, unsigned int flags) igt_object_create() argument
[all...]
H A Di915_gem_gtt.c233 /* Keep creating larger objects until one cannot fit into the hole */ in lowlevel_hole()
266 * large objects without checking that we have sufficient in lowlevel_hole()
347 static void close_object_list(struct list_head *objects, in close_object_list() argument
353 list_for_each_entry_safe(obj, on, objects, st_link) { in close_object_list()
376 LIST_HEAD(objects); in fill_hole()
402 list_add(&obj->st_link, &objects); in fill_hole()
404 /* Align differing sized objects against the edges, and in fill_hole()
412 list_for_each_entry(obj, &objects, st_link) { in fill_hole()
449 list_for_each_entry(obj, &objects, st_link) { in fill_hole()
485 list_for_each_entry_reverse(obj, &objects, st_lin in fill_hole()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/
H A Di915_gem_evict.c40 struct list_head *objects) in quirk_add()
42 /* quirk is only for live tiled objects, use it to declare ownership */ in quirk_add()
45 list_add(&obj->st_link, objects); in quirk_add()
48 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) in populate_ggtt() argument
71 quirk_add(obj, objects); in populate_ggtt()
78 pr_err("No objects on the GGTT inactive list!\n"); in populate_ggtt()
111 LIST_HEAD(objects); in igt_evict_something()
114 /* Fill the GGTT with pinned objects and try to evict one. */ in igt_evict_something()
116 err = populate_ggtt(ggtt, &objects); in igt_evict_something()
149 cleanup_objects(ggtt, &objects); in igt_evict_something()
39 quirk_add(struct drm_i915_gem_object *obj, struct list_head *objects) quirk_add() argument
[all...]
H A Dintel_memory_region.c33 struct list_head *objects) in close_objects()
38 list_for_each_entry_safe(obj, on, objects, st_link) { in close_objects()
62 LIST_HEAD(objects); in igt_mock_fill()
85 list_add(&obj->st_link, &objects); in igt_mock_fill()
101 close_objects(mem, &objects); in igt_mock_fill()
108 struct list_head *objects, in igt_object_create()
123 list_add(&obj->st_link, objects); in igt_object_create()
166 LIST_HEAD(objects); in igt_mock_reserve()
211 obj = igt_object_create(mem, &objects, size, 0); in igt_mock_reserve()
229 close_objects(mem, &objects); in igt_mock_reserve()
32 close_objects(struct intel_memory_region *mem, struct list_head *objects) close_objects() argument
107 igt_object_create(struct intel_memory_region *mem, struct list_head *objects, u64 size, unsigned int flags) igt_object_create() argument
[all...]
H A Di915_gem_gtt.c258 /* Keep creating larger objects until one cannot fit into the hole */ in lowlevel_hole()
292 * large objects without checking that we have sufficient in lowlevel_hole()
390 static void close_object_list(struct list_head *objects, in close_object_list() argument
396 list_for_each_entry_safe(obj, on, objects, st_link) { in close_object_list()
421 LIST_HEAD(objects); in fill_hole()
447 list_add(&obj->st_link, &objects); in fill_hole()
449 /* Align differing sized objects against the edges, and in fill_hole()
457 list_for_each_entry(obj, &objects, st_link) { in fill_hole()
497 list_for_each_entry(obj, &objects, st_link) { in fill_hole()
536 list_for_each_entry_reverse(obj, &objects, st_lin in fill_hole()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/
H A Ddrm_exec.c11 * multiple GEM objects while preparing hardware operations (e.g. command
15 * unlocks all previously locked GEM objects and locks the contended one first
16 * before locking any further objects.
53 /* Unlock all objects and drop references */
73 * Initialize the object and make sure that we can track locked objects.
78 exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); in drm_exec_init()
81 exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; in drm_exec_init()
92 * Unlock all locked objects, drop the references to objects and free all memory
98 kvfree(exec->objects); in drm_exec_fini()
315 drm_exec_prepare_array(struct drm_exec *exec, struct drm_gem_object **objects, unsigned int num_objects, unsigned int num_fences) drm_exec_prepare_array() argument
[all...]
H A Ddrm_lease.c31 * - An 'owner' is a &struct drm_master that is not leasing objects from
32 * another &struct drm_master, and hence 'owns' the objects. The owner can be
35 * - A 'lessor' is a &struct drm_master which is leasing objects to one or more
39 * - A 'lessee' is a &struct drm_master which is leasing objects from some
41 * lessor recorded in &drm_master.lessor, and holds the set of objects that
49 * The set of objects any &struct drm_master 'controls' is limited to the set
50 * of objects it leases (for lessees) or all objects (for owners).
58 * Since each lessee may lease objects from a single lessor, display resource
65 * objects fro
346 validate_lease(struct drm_device *dev, int object_count, struct drm_mode_object **objects, bool universal_planes) validate_lease() argument
384 struct drm_mode_object **objects; fill_object_idr() local
[all...]
/kernel/linux/linux-5.10/scripts/
H A Dlink-vmlinux.sh6 # vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_OBJS) and
75 local objects
78 objects="--whole-archive \
101 ${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${lds} ${objects}
140 local objects
157 objects="--whole-archive \
162 objects="--whole-archive \
174 -T ${lds} ${objects}
176 objects="-Wl,--whole-archive \
188 ${objects} \
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/
H A Ddrm_lease.c211 * drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
212 * @lessor: lease holder (or owner) of objects
213 * @leases: objects to lease to the new drm_master
216 * make sure all of the desired objects can be leased, atomically
326 * _drm_lease_revoke - revoke access to all leased objects (idr_mutex held)
368 * drm_lease_revoke - revoke access to all leased objects (idr_mutex not held)
380 struct drm_mode_object **objects, in validate_lease()
392 if (objects[o]->type == DRM_MODE_OBJECT_CRTC && has_crtc == -1) { in validate_lease()
395 if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1) in validate_lease()
399 if (objects[ in validate_lease()
378 validate_lease(struct drm_device *dev, int object_count, struct drm_mode_object **objects, bool universal_planes) validate_lease() argument
416 struct drm_mode_object **objects; fill_object_idr() local
[all...]
/kernel/linux/linux-6.6/drivers/iommu/iommufd/
H A Dmain.c5 * iommufd provides control over the IOMMU HW objects created by IOMMU kernel
6 * drivers. IOMMU HW objects revolve around IO page tables that map incoming DMA
62 rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, in _iommufd_object_alloc()
76 * destruction. Expect for special kernel-only objects there is no in-kernel way
77 * to reliably destroy a single object. Thus all APIs that are creating objects
86 old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL); in iommufd_object_finalize()
96 old = xa_erase(&ictx->objects, obj->id); in iommufd_object_abort()
123 xa_lock(&ictx->objects); in iommufd_get_object()
124 obj = xa_load(&ictx->objects, id); in iommufd_get_object()
128 xa_unlock(&ictx->objects); in iommufd_get_object()
[all...]
H A Dvfio_compat.c19 xa_lock(&ictx->objects); in get_compat_ioas()
24 xa_unlock(&ictx->objects); in get_compat_ioas()
59 xa_lock(&ictx->objects); in iommufd_vfio_compat_set_no_iommu()
66 xa_unlock(&ictx->objects); in iommufd_vfio_compat_set_no_iommu()
89 xa_lock(&ictx->objects); in iommufd_vfio_compat_ioas_create()
105 xa_unlock(&ictx->objects); in iommufd_vfio_compat_ioas_create()
117 xa_unlock(&ictx->objects); in iommufd_vfio_compat_ioas_create()
143 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
145 xa_unlock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
150 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/
H A Di915_gem_region.c115 mutex_lock(&mem->objects.lock); in i915_gem_object_init_memory_region()
118 list_add(&obj->mm.region_link, &mem->objects.purgeable); in i915_gem_object_init_memory_region()
120 list_add(&obj->mm.region_link, &mem->objects.list); in i915_gem_object_init_memory_region()
122 mutex_unlock(&mem->objects.lock); in i915_gem_object_init_memory_region()
129 mutex_lock(&mem->objects.lock); in i915_gem_object_release_memory_region()
131 mutex_unlock(&mem->objects.lock); in i915_gem_object_release_memory_region()
/kernel/linux/linux-5.10/scripts/kconfig/
H A Dstreamline_config.pl145 my %objects;
295 # Read all Makefiles to map the configs to the objects
319 # collect objects after obj-$(CONFIG_FOO_BAR)
336 if (defined($objects{$1})) {
337 @arr = @{$objects{$1}};
342 # The objects have a hash mapping to a reference
344 $objects{$1} = \@arr;
399 if (defined($objects{$module})) {
400 my @arr = @{$objects{$module}};
691 if (defined($objects{
[all...]
/kernel/linux/linux-6.6/scripts/kconfig/
H A Dstreamline_config.pl145 my %objects;
295 # Read all Makefiles to map the configs to the objects
319 # collect objects after obj-$(CONFIG_FOO_BAR)
336 if (defined($objects{$1})) {
337 @arr = @{$objects{$1}};
342 # The objects have a hash mapping to a reference
344 $objects{$1} = \@arr;
399 if (defined($objects{$module})) {
400 my @arr = @{$objects{$module}};
691 if (defined($objects{
[all...]
/kernel/linux/linux-5.10/tools/vm/
H A Dslabinfo.c36 unsigned long partial, objects, slabs, objects_partial, objects_total; member
360 return slab_size(s) - s->objects * s->object_size; in slab_waste()
536 s->name, s->aliases, s->order, s->objects); in report()
553 onoff(s->red_zone), s->objects * s->object_size); in report()
556 s->slabs * (page_size << s->order) - s->objects * s->object_size); in report()
559 (s->slab_size - s->object_size) * s->objects); in report()
633 s->name, s->objects, in slabcache()
641 s->name, s->objects, s->object_size, size_str, dist_str, in slabcache()
644 s->slabs ? (s->objects * s->object_size * 100) / in slabcache()
704 if (s->objects > in slab_empty()
[all...]
/kernel/linux/linux-6.6/tools/mm/
H A Dslabinfo.c36 unsigned long partial, objects, slabs, objects_partial, objects_total; member
380 return slab_size(s) - s->objects * s->object_size; in slab_waste()
560 s->name, s->aliases, s->order, s->objects); in report()
577 onoff(s->red_zone), s->objects * s->object_size); in report()
580 s->slabs * (page_size << s->order) - s->objects * s->object_size); in report()
583 (s->slab_size - s->object_size) * s->objects); in report()
657 s->name, s->objects, in slabcache()
665 s->name, s->objects, s->object_size, size_str, dist_str, in slabcache()
668 s->slabs ? (s->objects * s->object_size * 100) / in slabcache()
728 if (s->objects > in slab_empty()
[all...]
/kernel/linux/linux-6.6/include/drm/
H A Ddrm_exec.h29 * @num_objects: number of objects locked
34 * @max_objects: maximum objects in array
39 * @objects: array of the locked objects
41 struct drm_gem_object **objects; member
60 * index is within the number of locked objects. NULL otherwise.
65 return index < exec->num_objects ? exec->objects[index] : NULL; in drm_exec_obj()
69 * drm_exec_for_each_locked_object - iterate over all the locked objects
74 * Iterate over all the locked GEM objects inside the drm_exec object.
81 * objects i
[all...]
/kernel/linux/linux-5.10/lib/
H A Dtest_meminit.c131 /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
236 * can touch objects after they're freed. in do_kmem_cache_size()
324 void *objects[10]; in do_kmem_cache_size_bulk() local
328 num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), in do_kmem_cache_size_bulk()
329 objects); in do_kmem_cache_size_bulk()
331 bytes = count_nonzero_bytes(objects[i], size); in do_kmem_cache_size_bulk()
334 fill_with_garbage(objects[i], size); in do_kmem_cache_size_bulk()
338 kmem_cache_free_bulk(c, num, objects); in do_kmem_cache_size_bulk()
/kernel/linux/linux-6.6/lib/
H A Dtest_meminit.c152 /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
257 * can touch objects after they're freed. in do_kmem_cache_size()
353 void *objects[10]; in do_kmem_cache_size_bulk() local
357 num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), in do_kmem_cache_size_bulk()
358 objects); in do_kmem_cache_size_bulk()
360 bytes = count_nonzero_bytes(objects[i], size); in do_kmem_cache_size_bulk()
363 fill_with_garbage(objects[i], size); in do_kmem_cache_size_bulk()
367 kmem_cache_free_bulk(c, num, objects); in do_kmem_cache_size_bulk()
/kernel/linux/linux-6.6/mm/kfence/
H A Dkfence_test.c293 * even for KFENCE objects; these are required so that in test_alloc()
531 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
539 void *objects[] = { in test_free_bulk() local
547 kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects); in test_free_bulk()
731 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
746 void *objects[100]; in test_memcache_alloc_bulk() local
747 int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), in test_memcache_alloc_bulk()
748 objects); in test_memcache_alloc_bulk()
751 for (i = 0; i < ARRAY_SIZE(objects); in test_memcache_alloc_bulk()
[all...]
/kernel/linux/linux-6.6/mm/
H A Dslub.c4 * objects in per cpu and per node lists.
72 * A. slab->freelist -> List of free objects in a slab
73 * B. slab->inuse -> Number of objects in use
74 * C. slab->objects -> Number of objects in slab
82 * processors may put objects onto the freelist but the processor that
83 * froze the slab is the only one that can retrieve the objects from the
97 * allocating a long series of objects that fill up slabs does not require
142 * cannot scan all objects.
161 * free objects i
1381 inc_slabs_node(struct kmem_cache *s, int node, int objects) inc_slabs_node() argument
1396 dec_slabs_node(struct kmem_cache *s, int node, int objects) dec_slabs_node() argument
1748 inc_slabs_node(struct kmem_cache *s, int node, int objects) inc_slabs_node() argument
1750 dec_slabs_node(struct kmem_cache *s, int node, int objects) dec_slabs_node() argument
5596 unsigned int objects; cpu_partial_store() local
5645 int objects = 0; slabs_cpu_partial_show() local
5735 SLAB_ATTR_RO(objects); global() variable
[all...]
/kernel/linux/linux-5.10/mm/
H A Dslub.c4 * objects in per cpu and per node lists.
57 * B. page->inuse -> Number of objects in use
58 * C. page->objects -> Number of objects in page
64 * processors may put objects onto the freelist but the processor that
65 * froze the slab is the only one that can retrieve the objects from the
77 * allocating a long series of objects that fill up slabs does not require
91 * cannot scan all objects.
110 * free objects in addition to the regular freelist
171 * sort the partial list by the number of objects i
1081 inc_slabs_node(struct kmem_cache *s, int node, int objects) inc_slabs_node() argument
1096 dec_slabs_node(struct kmem_cache *s, int node, int objects) dec_slabs_node() argument
1488 inc_slabs_node(struct kmem_cache *s, int node, int objects) inc_slabs_node() argument
1490 dec_slabs_node(struct kmem_cache *s, int node, int objects) dec_slabs_node() argument
1910 acquire_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page, int mode, int *objects) acquire_slab() argument
1962 int objects; get_partial_node() local
5086 unsigned int objects; cpu_partial_store() local
5131 SLAB_ATTR_RO(objects); global() variable
5141 int objects = 0; slabs_cpu_partial_show() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/
H A Dintel_region_ttm.c117 * objects placed in this region. in intel_region_ttm_fini()
122 /* Flush objects from region. */ in intel_region_ttm_fini()
126 mutex_lock(&mem->objects.lock); in intel_region_ttm_fini()
127 if (list_empty(&mem->objects.list)) in intel_region_ttm_fini()
129 mutex_unlock(&mem->objects.lock); in intel_region_ttm_fini()
137 /* If we leaked objects, Don't free the region causing use after free */ in intel_region_ttm_fini()

Completed in 26 milliseconds

123456