Home
last modified time | relevance | path

Searched refs:batch (Results 1 - 25 of 245) sorted by relevance

12345678910

/kernel/linux/linux-6.6/mm/
H A Dmmu_gather.c20 struct mmu_gather_batch *batch; in tlb_next_batch() local
26 batch = tlb->active; in tlb_next_batch()
27 if (batch->next) { in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
35 batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); in tlb_next_batch()
36 if (!batch) in tlb_next_batch()
40 batch->next = NULL; in tlb_next_batch()
41 batch->nr = 0; in tlb_next_batch()
42 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
51 tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) tlb_flush_rmap_batch() argument
87 struct mmu_gather_batch *batch; tlb_batch_pages_flush() local
110 struct mmu_gather_batch *batch, *next; tlb_batch_list_free() local
121 struct mmu_gather_batch *batch; __tlb_remove_page_size() local
149 __tlb_remove_table_free(struct mmu_table_batch *batch) __tlb_remove_table_free() argument
212 tlb_remove_table_free(struct mmu_table_batch *batch) tlb_remove_table_free() argument
219 tlb_remove_table_free(struct mmu_table_batch *batch) tlb_remove_table_free() argument
249 struct mmu_table_batch **batch = &tlb->batch; tlb_table_flush() local
260 struct mmu_table_batch **batch = &tlb->batch; tlb_remove_table() local
[all...]
/kernel/linux/linux-5.10/mm/
H A Dmmu_gather.c18 struct mmu_gather_batch *batch; in tlb_next_batch() local
20 batch = tlb->active; in tlb_next_batch()
21 if (batch->next) { in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
29 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch()
30 if (!batch) in tlb_next_batch()
34 batch->next = NULL; in tlb_next_batch()
35 batch->nr = 0; in tlb_next_batch()
36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
46 struct mmu_gather_batch *batch; tlb_batch_pages_flush() local
57 struct mmu_gather_batch *batch, *next; tlb_batch_list_free() local
68 struct mmu_gather_batch *batch; __tlb_remove_page_size() local
96 __tlb_remove_table_free(struct mmu_table_batch *batch) __tlb_remove_table_free() argument
159 tlb_remove_table_free(struct mmu_table_batch *batch) tlb_remove_table_free() argument
166 tlb_remove_table_free(struct mmu_table_batch *batch) tlb_remove_table_free() argument
196 struct mmu_table_batch **batch = &tlb->batch; tlb_table_flush() local
207 struct mmu_table_batch **batch = &tlb->batch; tlb_remove_table() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/
H A Digt_spinner.c97 if (!spin->batch) { in igt_spinner_pin()
105 spin->batch = vaddr; in igt_spinner_pin()
131 u32 *batch; in igt_spinner_create_request() local
139 if (!spin->batch) { in igt_spinner_create_request()
160 batch = spin->batch; in igt_spinner_create_request()
163 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request()
164 *batch++ = lower_32_bits(hws_address(hws, rq)); in igt_spinner_create_request()
165 *batch++ = upper_32_bits(hws_address(hws, rq)); in igt_spinner_create_request()
167 *batch in igt_spinner_create_request()
[all...]
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/
H A Dhash_tlb.c37 * immediately or will batch it up if the current CPU has an active
38 * batch on it.
44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local
51 i = batch->index; in hpte_need_flush()
100 * Check if we have an active batch on this CPU. If not, just in hpte_need_flush()
103 if (!batch->active) { in hpte_need_flush()
110 * This can happen when we are in the middle of a TLB batch and in hpte_need_flush()
113 * up scanning and resetting referenced bits then our batch context in hpte_need_flush()
117 * batch in hpte_need_flush()
119 if (i != 0 && (mm != batch in hpte_need_flush()
144 __flush_tlb_pending(struct ppc64_tlb_batch *batch) __flush_tlb_pending() argument
[all...]
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/
H A Dhash_tlb.c37 * immediately or will batch it up if the current CPU has an active
38 * batch on it.
44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local
51 i = batch->index; in hpte_need_flush()
100 * Check if we have an active batch on this CPU. If not, just in hpte_need_flush()
103 if (!batch->active) { in hpte_need_flush()
110 * This can happen when we are in the middle of a TLB batch and in hpte_need_flush()
113 * up scanning and resetting referenced bits then our batch context in hpte_need_flush()
117 * batch in hpte_need_flush()
119 if (i != 0 && (mm != batch in hpte_need_flush()
144 __flush_tlb_pending(struct ppc64_tlb_batch *batch) __flush_tlb_pending() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/
H A Digt_spinner.c47 spin->batch = vaddr; in igt_spinner_init()
97 u32 *batch; in igt_spinner_create_request() local
135 batch = spin->batch; in igt_spinner_create_request()
138 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request()
139 *batch++ = lower_32_bits(hws_address(hws, rq)); in igt_spinner_create_request()
140 *batch++ = upper_32_bits(hws_address(hws, rq)); in igt_spinner_create_request()
142 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request()
143 *batch++ = 0; in igt_spinner_create_request()
144 *batch in igt_spinner_create_request()
[all...]
/kernel/linux/linux-6.6/drivers/iommu/iommufd/
H A Dpages.c275 static void batch_clear(struct pfn_batch *batch) in batch_clear() argument
277 batch->total_pfns = 0; in batch_clear()
278 batch->end = 0; in batch_clear()
279 batch->pfns[0] = 0; in batch_clear()
280 batch->npfns[0] = 0; in batch_clear()
285 * batch
287 static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) in batch_clear_carry() argument
290 return batch_clear(batch); in batch_clear_carry()
293 WARN_ON(!batch->end || in batch_clear_carry()
294 batch in batch_clear_carry()
303 batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) batch_skip_carry() argument
315 __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) __batch_init() argument
332 batch_init(struct pfn_batch *batch, size_t max_pages) batch_init() argument
337 batch_init_backup(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) batch_init_backup() argument
343 batch_destroy(struct pfn_batch *batch, void *backup) batch_destroy() argument
350 batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) batch_add_pfn() argument
375 batch_from_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) batch_from_domain() argument
426 batch_from_domain_continue(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) batch_from_domain_continue() argument
475 batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index) batch_to_domain() argument
519 batch_from_xarray(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) batch_from_xarray() argument
540 batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) batch_from_xarray_clear() argument
615 batch_from_pages(struct pfn_batch *batch, struct page **pages, size_t npages) batch_from_pages() argument
625 batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) batch_unpin() argument
666 batch_rw(struct pfn_batch *batch, void *data, unsigned long offset, unsigned long length, unsigned int flags) batch_rw() argument
939 struct pfn_batch batch; global() member
1197 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) iopt_area_unpin_domain() argument
1266 struct pfn_batch batch; __iopt_area_unfill_domain() local
1519 iopt_pages_unpin_xarray(struct pfn_batch *batch, struct iopt_pages *pages, unsigned long start_index, unsigned long end_index) iopt_pages_unpin_xarray() argument
1548 struct pfn_batch batch; iopt_pages_unfill_xarray() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/
H A Di915_gem_object_blt.c23 struct i915_vma *batch; in intel_emit_vma_fill_blt() local
47 batch = i915_vma_instance(pool->obj, ce->vm, NULL); in intel_emit_vma_fill_blt()
48 if (IS_ERR(batch)) { in intel_emit_vma_fill_blt()
49 err = PTR_ERR(batch); in intel_emit_vma_fill_blt()
53 err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); in intel_emit_vma_fill_blt()
102 batch->private = pool; in intel_emit_vma_fill_blt()
103 return batch; in intel_emit_vma_fill_blt()
106 i915_vma_unpin(batch); in intel_emit_vma_fill_blt()
151 struct i915_vma *batch; in i915_gem_object_fill_blt() local
174 batch in i915_gem_object_fill_blt()
245 struct i915_vma *batch; intel_emit_vma_copy_blt() local
356 struct i915_vma *vma[2], *batch; i915_gem_object_copy_blt() local
[all...]
H A Di915_gem_execbuffer.c149 * Any render targets written to in the batch must be flagged with
220 * Before any batch is given extra privileges we first must check that it
248 struct eb_vma *batch; /** identity of the batch obj/vma */ member
289 u64 batch_len; /** Length of batch within object */
290 u32 batch_start_offset; /** Location within object of batch */
292 struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
553 * SNA is doing fancy tricks with compressing batch buffers, which leads in eb_add_vma()
555 * relocate address is still positive, except when the batch is placed in eb_add_vma()
568 eb->batch in eb_add_vma()
834 unsigned int batch = eb_batch_index(eb); eb_lookup_vmas() local
1274 struct i915_vma *batch; __reloc_gpu_alloc() local
1456 u32 *batch; __reloc_entry_gpu() local
2289 struct i915_vma *shadow, *trampoline, *batch; eb_parse() local
2395 eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) eb_submit() argument
3034 struct i915_vma *batch; i915_gem_do_execbuffer() local
[all...]
/kernel/linux/linux-5.10/arch/powerpc/include/asm/book3s/64/
H A Dtlbflush-hash.h25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local
35 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode()
36 batch->active = 1; in arch_enter_lazy_mmu_mode()
41 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local
45 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode()
47 if (batch->index) in arch_leave_lazy_mmu_mode()
48 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode()
49 batch->active = 0; in arch_leave_lazy_mmu_mode()
/kernel/linux/linux-6.6/arch/powerpc/include/asm/book3s/64/
H A Dtlbflush-hash.h25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local
40 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode()
41 batch->active = 1; in arch_enter_lazy_mmu_mode()
46 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local
50 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode()
52 if (batch->index) in arch_leave_lazy_mmu_mode()
53 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode()
54 batch->active = 0; in arch_leave_lazy_mmu_mode()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
H A Dgen7_renderclear.c234 gen7_emit_state_base_address(struct batch_chunk *batch, in gen7_emit_state_base_address() argument
237 u32 *cs = batch_alloc_items(batch, 0, 10); in gen7_emit_state_base_address()
241 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
243 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
245 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
247 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
249 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
256 batch_advance(batch, cs); in gen7_emit_state_base_address()
260 gen7_emit_vfe_state(struct batch_chunk *batch, in gen7_emit_vfe_state() argument
266 u32 *cs = batch_alloc_items(batch, 3 in gen7_emit_vfe_state()
289 gen7_emit_interface_descriptor_load(struct batch_chunk *batch, const u32 interface_descriptor, unsigned int count) gen7_emit_interface_descriptor_load() argument
308 gen7_emit_media_object(struct batch_chunk *batch, unsigned int media_object_index) gen7_emit_media_object() argument
339 gen7_emit_pipeline_flush(struct batch_chunk *batch) gen7_emit_pipeline_flush() argument
354 gen7_emit_pipeline_invalidate(struct batch_chunk *batch) gen7_emit_pipeline_invalidate() argument
434 u32 *batch; gen7_setup_clear_gpr_bb() local
[all...]
H A Dselftest_hangcheck.c53 u32 *batch; member
96 h->batch = vaddr; in hang_init()
143 u32 *batch; in hang_create_request() local
163 h->batch = vaddr; in hang_create_request()
201 batch = h->batch; in hang_create_request()
203 *batch++ = MI_STORE_DWORD_IMM_GEN4; in hang_create_request()
204 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request()
205 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request()
206 *batch in hang_create_request()
[all...]
H A Dintel_engine.h226 static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) in __gen8_emit_pipe_control() argument
228 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control()
230 batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; in __gen8_emit_pipe_control()
231 batch[1] = flags1; in __gen8_emit_pipe_control()
232 batch[2] = offset; in __gen8_emit_pipe_control()
234 return batch + 6; in __gen8_emit_pipe_control()
237 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) in gen8_emit_pipe_control() argument
239 return __gen8_emit_pipe_control(batch, 0, flags, offset); in gen8_emit_pipe_control()
242 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) in gen12_emit_pipe_control() argument
244 return __gen8_emit_pipe_control(batch, flags in gen12_emit_pipe_control()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/
H A Dgen7_renderclear.c235 gen7_emit_state_base_address(struct batch_chunk *batch, in gen7_emit_state_base_address() argument
238 u32 *cs = batch_alloc_items(batch, 0, 10); in gen7_emit_state_base_address()
242 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
244 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
246 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
248 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
250 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address()
257 batch_advance(batch, cs); in gen7_emit_state_base_address()
261 gen7_emit_vfe_state(struct batch_chunk *batch, in gen7_emit_vfe_state() argument
267 u32 *cs = batch_alloc_items(batch, 3 in gen7_emit_vfe_state()
290 gen7_emit_interface_descriptor_load(struct batch_chunk *batch, const u32 interface_descriptor, unsigned int count) gen7_emit_interface_descriptor_load() argument
309 gen7_emit_media_object(struct batch_chunk *batch, unsigned int media_object_index) gen7_emit_media_object() argument
340 gen7_emit_pipeline_flush(struct batch_chunk *batch) gen7_emit_pipeline_flush() argument
355 gen7_emit_pipeline_invalidate(struct batch_chunk *batch) gen7_emit_pipeline_invalidate() argument
435 u32 *batch; gen7_setup_clear_gpr_bb() local
[all...]
H A Dgen8_engine_cs.h53 __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, in __gen8_emit_pipe_control() argument
56 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control()
58 batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; in __gen8_emit_pipe_control()
59 batch[1] = bit_group_1; in __gen8_emit_pipe_control()
60 batch[2] = offset; in __gen8_emit_pipe_control()
62 return batch + 6; in __gen8_emit_pipe_control()
65 static inline u32 *gen8_emit_pipe_control(u32 *batch, in gen8_emit_pipe_control() argument
68 return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); in gen8_emit_pipe_control()
71 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, in gen12_emit_pipe_control() argument
74 return __gen8_emit_pipe_control(batch, bit_group_ in gen12_emit_pipe_control()
[all...]
H A Dintel_lrc.c99 /* Close the batch; used mainly by live_lrc_layout() */ in set_offsets()
934 * A context is actually a big batch buffer with several in __lrc_init_regs()
1346 * batch buffer to ensure the value takes effect properly. All other bits
1562 * but there is a slight complication as this is applied in WA batch where the
1568 * it for a short period and this batch in non-premptible. We can ofcourse
1576 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) in gen8_emit_flush_coherentl3_wa() argument
1579 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa()
1580 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa()
1581 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa()
1583 *batch in gen8_emit_flush_coherentl3_wa()
1618 gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) gen8_init_indirectctx_bb() argument
1656 emit_lri(u32 *batch, const struct lri *lri, unsigned int count) emit_lri() argument
1670 gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) gen9_init_indirectctx_bb() argument
1783 void *batch, *batch_ptr; lrc_init_wa_ctx() local
[all...]
H A Dselftest_hangcheck.c38 u32 *batch; member
81 h->batch = vaddr; in hang_init()
113 u32 *batch; in hang_create_request() local
133 h->batch = vaddr; in hang_create_request()
171 batch = h->batch; in hang_create_request()
173 *batch++ = MI_STORE_DWORD_IMM_GEN4; in hang_create_request()
174 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request()
175 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request()
176 *batch in hang_create_request()
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/
H A Digt_gem_utils.c114 struct i915_vma *batch; in igt_gpu_fill_dw() local
121 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw()
122 if (IS_ERR(batch)) in igt_gpu_fill_dw()
123 return PTR_ERR(batch); in igt_gpu_fill_dw()
131 i915_vma_lock(batch); in igt_gpu_fill_dw()
132 err = i915_request_await_object(rq, batch->obj, false); in igt_gpu_fill_dw()
134 err = i915_vma_move_to_active(batch, rq, 0); in igt_gpu_fill_dw()
135 i915_vma_unlock(batch); in igt_gpu_fill_dw()
152 batch->node.start, batch in igt_gpu_fill_dw()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/
H A Digt_gem_utils.c116 struct i915_vma *batch; in igt_gpu_fill_dw() local
123 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw()
124 if (IS_ERR(batch)) in igt_gpu_fill_dw()
125 return PTR_ERR(batch); in igt_gpu_fill_dw()
133 err = igt_vma_move_to_active_unlocked(batch, rq, 0); in igt_gpu_fill_dw()
146 i915_vma_offset(batch), in igt_gpu_fill_dw()
147 i915_vma_size(batch), in igt_gpu_fill_dw()
155 i915_vma_unpin_and_release(&batch, 0); in igt_gpu_fill_dw()
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_mob.c236 struct vmw_otable_batch *batch) in vmw_otable_batch_setup()
240 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup()
245 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
254 ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo); in vmw_otable_batch_setup()
259 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
260 if (!batch->otables[i].enabled) in vmw_otable_batch_setup()
263 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, in vmw_otable_batch_setup()
274 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
275 if (batch->otables[i].enabled) in vmw_otable_batch_setup()
277 &batch in vmw_otable_batch_setup()
235 vmw_otable_batch_setup(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) vmw_otable_batch_setup() argument
327 vmw_otable_batch_takedown(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) vmw_otable_batch_takedown() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_mob.c238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup()
242 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup()
247 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
257 &batch->otable_bo); in vmw_otable_batch_setup()
262 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
263 if (!batch->otables[i].enabled) in vmw_otable_batch_setup()
267 &batch->otable_bo->tbo, in vmw_otable_batch_setup()
278 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup()
279 if (batch->otables[i].enabled) in vmw_otable_batch_setup()
281 &batch in vmw_otable_batch_setup()
237 vmw_otable_batch_setup(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) vmw_otable_batch_setup() argument
332 vmw_otable_batch_takedown(struct vmw_private *dev_priv, struct vmw_otable_batch *batch) vmw_otable_batch_takedown() argument
[all...]
/kernel/linux/linux-5.10/drivers/xen/
H A Dgntdev.c789 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument
797 ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); in gntdev_get_page()
801 batch->pages[batch->nr_pages++] = page; in gntdev_get_page()
809 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument
811 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); in gntdev_put_pages()
812 batch->nr_pages = 0; in gntdev_put_pages()
813 batch in gntdev_put_pages()
816 gntdev_copy(struct gntdev_copy_batch *batch) gntdev_copy() argument
848 gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, struct gntdev_grant_copy_segment *seg, s16 __user *status) gntdev_grant_copy_seg() argument
946 struct gntdev_copy_batch batch; gntdev_ioctl_grant_copy() local
[all...]
/kernel/linux/linux-6.6/drivers/xen/
H A Dgntdev.c799 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument
807 ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); in gntdev_get_page()
811 batch->pages[batch->nr_pages++] = page; in gntdev_get_page()
819 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument
821 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); in gntdev_put_pages()
822 batch->nr_pages = 0; in gntdev_put_pages()
823 batch in gntdev_put_pages()
826 gntdev_copy(struct gntdev_copy_batch *batch) gntdev_copy() argument
858 gntdev_grant_copy_seg(struct gntdev_copy_batch *batch, struct gntdev_grant_copy_segment *seg, s16 __user *status) gntdev_grant_copy_seg() argument
956 struct gntdev_copy_batch batch; gntdev_ioctl_grant_copy() local
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/
H A Dtest_bpf_ma.c42 static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch, in batch_alloc_free() argument
49 for (i = 0; i < batch; i++) { in batch_alloc_free()
68 for (i = 0; i < batch; i++) { in batch_alloc_free()
84 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
85 batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx)
106 /* Alloc 128 8-bytes objects in batch to trigger refilling, in test_bpf_mem_alloc_free()
107 * then free 128 8-bytes objects in batch to trigger freeing. in test_bpf_mem_alloc_free()

Completed in 19 milliseconds

12345678910