Home
last modified time | relevance | path

Searched refs:next (Results 151 - 175 of 6735) sorted by relevance

12345678910>>...270

/kernel/linux/linux-6.6/sound/isa/gus/
H A Dgus_mem.c47 nblock->next = pblock; in snd_gf1_mem_xalloc()
52 nblock->prev->next = nblock; in snd_gf1_mem_xalloc()
56 pblock = pblock->next; in snd_gf1_mem_xalloc()
58 nblock->next = NULL; in snd_gf1_mem_xalloc()
64 alloc->last->next = nblock; in snd_gf1_mem_xalloc()
78 alloc->first = block->next; in snd_gf1_mem_xfree()
79 if (block->next) in snd_gf1_mem_xfree()
80 block->next->prev = NULL; in snd_gf1_mem_xfree()
82 block->prev->next = block->next; in snd_gf1_mem_xfree()
[all...]
/kernel/linux/linux-6.6/arch/hexagon/include/asm/
H A Dmmu_context.h29 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, in switch_mm() argument
38 if (next->context.generation < prev->context.generation) { in switch_mm()
40 next->pgd[l1] = init_mm.pgd[l1]; in switch_mm()
42 next->context.generation = prev->context.generation; in switch_mm()
45 __vmnewmap((void *)next->context.ptbase); in switch_mm()
52 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) in activate_mm() argument
57 switch_mm(prev, next, current_thread_info()->task); in activate_mm()
/kernel/linux/linux-6.6/lib/
H A Dllist.c32 new_last->next = first; in llist_add_batch()
49 * llist_add) sequence in another user may change @head->first->next,
55 struct llist_node *entry, *next; in llist_del_first() local
61 next = READ_ONCE(entry->next); in llist_del_first()
62 } while (!try_cmpxchg(&head->first, &entry, next)); in llist_del_first()
81 head = head->next; in llist_reverse_order()
82 tmp->next = new_head; in llist_reverse_order()
/kernel/linux/linux-6.6/block/
H A Dblk-merge.c53 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
77 * - if 'pb' ends unaligned, the next bio must include in bio_will_gap()
82 bio_get_first_bvec(next, &nb); in bio_will_gap()
136 * If the next starting sector would be misaligned, stop the discard at in bio_split_discard()
674 struct request *next) in req_attempt_discard_merge()
680 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge()
684 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); in req_attempt_discard_merge()
692 struct request *next) in ll_merge_requests_fn()
696 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
702 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > in ll_merge_requests_fn()
52 bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) bio_will_gap() argument
673 req_attempt_discard_merge(struct request_queue *q, struct request *req, struct request *next) req_attempt_discard_merge() argument
691 ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) ll_merge_requests_fn() argument
790 blk_try_req_merge(struct request *req, struct request *next) blk_try_req_merge() argument
805 attempt_merge(struct request_queue *q, struct request *req, struct request *next) attempt_merge() argument
890 struct request *next = elv_latter_request(q, rq); attempt_back_merge() local
914 blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next) blk_attempt_req_merge() argument
[all...]
/kernel/linux/linux-5.10/arch/s390/mm/
H A Dpageattr.c157 unsigned long next; in walk_pmd_level() local
165 next = pmd_addr_end(addr, end); in walk_pmd_level()
167 if (addr & ~PMD_MASK || addr + PMD_SIZE > next) { in walk_pmd_level()
175 rc = walk_pte_level(pmdp, addr, next, flags); in walk_pmd_level()
180 addr = next; in walk_pmd_level()
234 unsigned long next; in walk_pud_level() local
242 next = pud_addr_end(addr, end); in walk_pud_level()
244 if (addr & ~PUD_MASK || addr + PUD_SIZE > next) { in walk_pud_level()
252 rc = walk_pmd_level(pudp, addr, next, flags); in walk_pud_level()
255 addr = next; in walk_pud_level()
264 unsigned long next; walk_p4d_level() local
286 unsigned long next; change_page_attr() local
[all...]
H A Dvmem.c79 * from unused_pmd_start to next PMD_SIZE boundary.
211 unsigned long next, prot, pages = 0; in modify_pmd_table() local
221 for (; addr < end; addr = next, pmd++) { in modify_pmd_table()
222 next = pmd_addr_end(addr, end); in modify_pmd_table()
228 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
233 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) { in modify_pmd_table()
241 IS_ALIGNED(next, PMD_SIZE) && in modify_pmd_table()
261 !IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table()
262 vmemmap_use_new_sub_pmd(addr, next); in modify_pmd_table()
273 vmemmap_use_sub_pmd(addr, next); in modify_pmd_table()
313 unsigned long next, prot, pages = 0; modify_pud_table() local
390 unsigned long next; modify_p4d_table() local
444 unsigned long addr, next; modify_pagetable() local
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/
H A Dsparsebit.c224 * If current node has a right child, next node is the left-most in node_next()
235 * That parent is then the next node. in node_next()
252 * If current node has a left child, next node is the right-most in node_prev()
263 * That parent is then the next node. in node_prev()
559 * reductions are possible with the new previous and next nodes. Note,
605 struct node *prev, *next, *tmp; in node_reduce() local
614 * for the next pass through the reduction loop, in node_reduce()
617 * by first remembering the location of the next in node_reduce()
620 * no other nodes between prev and next. in node_reduce()
623 * both prev and next bot in node_reduce()
1374 struct node *nodep, *next; sparsebit_set_num() local
1456 struct node *nodep, *next; sparsebit_clear_num() local
1918 sparsebit_idx_t next; operate() local
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/
H A Dsparsebit.c224 * If current node has a right child, next node is the left-most in node_next()
235 * That parent is then the next node. in node_next()
252 * If current node has a left child, next node is the right-most in node_prev()
263 * That parent is then the next node. in node_prev()
559 * reductions are possible with the new previous and next nodes. Note,
605 struct node *prev, *next, *tmp; in node_reduce() local
614 * for the next pass through the reduction loop, in node_reduce()
617 * by first remembering the location of the next in node_reduce()
620 * no other nodes between prev and next. in node_reduce()
623 * both prev and next bot in node_reduce()
1373 struct node *nodep, *next; sparsebit_set_num() local
1455 struct node *nodep, *next; sparsebit_clear_num() local
1916 sparsebit_idx_t next; operate() local
[all...]
/kernel/linux/linux-6.6/arch/arm64/kernel/
H A Dprocess.c426 static void tls_thread_switch(struct task_struct *next) in tls_thread_switch() argument
430 if (is_compat_thread(task_thread_info(next))) in tls_thread_switch()
431 write_sysreg(next->thread.uw.tp_value, tpidrro_el0); in tls_thread_switch()
435 write_sysreg(*task_user_tls(next), tpidr_el0); in tls_thread_switch()
437 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); in tls_thread_switch()
444 static void ssbs_thread_switch(struct task_struct *next) in ssbs_thread_switch() argument
450 if (unlikely(next->flags & PF_KTHREAD)) in ssbs_thread_switch()
460 spectre_v4_enable_task_mitigation(next); in ssbs_thread_switch()
472 static void entry_task_switch(struct task_struct *next) in entry_task_switch() argument
474 __this_cpu_write(__entry_task, next); in entry_task_switch()
482 erratum_1418040_thread_switch(struct task_struct *next) erratum_1418040_thread_switch() argument
523 __switch_to(struct task_struct *prev, struct task_struct *next) __switch_to() argument
[all...]
/kernel/linux/linux-5.10/arch/ia64/include/asm/
H A Dmmu_context.h38 unsigned int next; /* next context number to use */ member
41 /* call wrap_mmu_context when next >= max */
93 if (ia64_ctx.next >= ia64_ctx.limit) { in get_mmu_context()
94 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, in get_mmu_context()
95 ia64_ctx.max_ctx, ia64_ctx.next); in get_mmu_context()
97 ia64_ctx.max_ctx, ia64_ctx.next); in get_mmu_context()
98 if (ia64_ctx.next >= ia64_ctx.max_ctx) in get_mmu_context()
101 mm->context = context = ia64_ctx.next++; in get_mmu_context()
187 activate_mm (struct mm_struct *prev, struct mm_struct *next) in activate_mm() argument
[all...]
/kernel/linux/linux-6.6/arch/ia64/include/asm/
H A Dmmu_context.h38 unsigned int next; /* next context number to use */ member
41 /* call wrap_mmu_context when next >= max */
88 if (ia64_ctx.next >= ia64_ctx.limit) { in get_mmu_context()
89 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, in get_mmu_context()
90 ia64_ctx.max_ctx, ia64_ctx.next); in get_mmu_context()
92 ia64_ctx.max_ctx, ia64_ctx.next); in get_mmu_context()
93 if (ia64_ctx.next >= ia64_ctx.max_ctx) in get_mmu_context()
96 mm->context = context = ia64_ctx.next++; in get_mmu_context()
179 activate_mm (struct mm_struct *prev, struct mm_struct *next) in activate_mm() argument
[all...]
/kernel/linux/linux-6.6/arch/s390/include/asm/
H A Dmmu_context.h73 static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, in switch_mm_irqs_off() argument
78 if (next == &init_mm) in switch_mm_irqs_off()
81 S390_lowcore.user_asce = next->context.asce; in switch_mm_irqs_off()
82 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); in switch_mm_irqs_off()
85 if (prev != next) in switch_mm_irqs_off()
90 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, in switch_mm() argument
96 switch_mm_irqs_off(prev, next, tsk); in switch_mm()
119 struct mm_struct *next) in activate_mm()
121 switch_mm(prev, next, current); in activate_mm()
122 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); in activate_mm()
118 activate_mm(struct mm_struct *prev, struct mm_struct *next) activate_mm() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dlist_nulls.h26 struct hlist_nulls_node *next, **pprev; member
96 n->next = first; in hlist_nulls_add_head()
100 WRITE_ONCE(first->pprev, &n->next); in hlist_nulls_add_head()
105 struct hlist_nulls_node *next = n->next; in __hlist_nulls_del() local
108 WRITE_ONCE(*pprev, next); in __hlist_nulls_del()
109 if (!is_a_nulls(next)) in __hlist_nulls_del()
110 WRITE_ONCE(next->pprev, pprev); in __hlist_nulls_del()
131 pos = pos->next)
143 pos = pos->next)
[all...]
H A Dpagewalk.h39 unsigned long next, struct mm_walk *walk);
41 unsigned long next, struct mm_walk *walk);
43 unsigned long next, struct mm_walk *walk);
45 unsigned long next, struct mm_walk *walk);
47 unsigned long next, struct mm_walk *walk);
48 int (*pte_hole)(unsigned long addr, unsigned long next,
51 unsigned long addr, unsigned long next,
53 int (*test_walk)(unsigned long addr, unsigned long next,
65 /* Descend to next level, splitting huge pages if needed and possible */
67 /* Continue to next entr
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dlist_nulls.h26 struct hlist_nulls_node *next, **pprev; member
96 n->next = first; in hlist_nulls_add_head()
100 WRITE_ONCE(first->pprev, &n->next); in hlist_nulls_add_head()
105 struct hlist_nulls_node *next = n->next; in __hlist_nulls_del() local
108 WRITE_ONCE(*pprev, next); in __hlist_nulls_del()
109 if (!is_a_nulls(next)) in __hlist_nulls_del()
110 WRITE_ONCE(next->pprev, pprev); in __hlist_nulls_del()
131 pos = pos->next)
143 pos = pos->next)
[all...]
H A Dpagewalk.h60 unsigned long next, struct mm_walk *walk);
62 unsigned long next, struct mm_walk *walk);
64 unsigned long next, struct mm_walk *walk);
66 unsigned long next, struct mm_walk *walk);
68 unsigned long next, struct mm_walk *walk);
69 int (*pte_hole)(unsigned long addr, unsigned long next,
72 unsigned long addr, unsigned long next,
74 int (*test_walk)(unsigned long addr, unsigned long next,
87 /* Descend to next level, splitting huge pages if needed and possible */
89 /* Continue to next entr
[all...]
/kernel/liteos_m/components/exchook/
H A Dlos_exchook.c41 struct Node *next; member
50 DoExcHookInRegOrder(excType, node->next); in DoExcHookInRegOrder()
77 g_excNodes[i].next = g_excHeads[EXC_TYPE_END]; in GetFreeNode()
85 g_excHeads[EXC_TYPE_END] = node->next; in GetFreeNode()
105 node->next = g_excHeads[excType]; in LOS_RegExcHook()
121 for (node = g_excHeads[excType]; node != NULL; node = node->next) { in LOS_UnRegExcHook()
124 preNode->next = node->next; in LOS_UnRegExcHook()
126 g_excHeads[excType] = node->next; in LOS_UnRegExcHook()
129 node->next in LOS_UnRegExcHook()
[all...]
/kernel/linux/linux-5.10/arch/x86/mm/
H A Dtlb.c194 * Make sure the next time we go to switch to in clear_asid_other()
205 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, in choose_new_asid() argument
221 next->context.ctx_id) in choose_new_asid()
244 * until the next time we switch to it.
309 void switch_mm(struct mm_struct *prev, struct mm_struct *next, in switch_mm() argument
315 switch_mm_irqs_off(prev, next, tsk); in switch_mm()
319 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) in mm_mangle_tif_spec_ib() argument
321 unsigned long next_tif = task_thread_info(next)->flags; in mm_mangle_tif_spec_ib()
324 return (unsigned long)next->mm | ibpb; in mm_mangle_tif_spec_ib()
327 static void cond_ibpb(struct task_struct *next) in cond_ibpb() argument
422 switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) switch_mm_irqs_off() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dramnv50.c97 unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40; in nv50_ram_timing_calc()
99 ram->base.next->bios.rammap_00_16_40) << 16 | in nv50_ram_timing_calc()
227 struct nvkm_ram_data *next; in nv50_ram_calc() local
235 next = &ram->base.target; in nv50_ram_calc()
236 next->freq = freq; in nv50_ram_calc()
237 ram->base.next = next; in nv50_ram_calc()
251 nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios); in nv50_ram_calc()
261 &next->bios); in nv50_ram_calc()
268 if (next in nv50_ram_calc()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
H A Dramnv50.c97 unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40; in nv50_ram_timing_calc()
99 ram->base.next->bios.rammap_00_16_40) << 16 | in nv50_ram_timing_calc()
226 struct nvkm_ram_data *next; in nv50_ram_calc() local
234 next = &ram->base.target; in nv50_ram_calc()
235 next->freq = freq; in nv50_ram_calc()
236 ram->base.next = next; in nv50_ram_calc()
250 nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios); in nv50_ram_calc()
260 &next->bios); in nv50_ram_calc()
267 if (next in nv50_ram_calc()
[all...]
/kernel/linux/linux-5.10/arch/arm64/kernel/
H A Dprocess.c452 static void tls_thread_switch(struct task_struct *next) in tls_thread_switch() argument
456 if (is_compat_thread(task_thread_info(next))) in tls_thread_switch()
457 write_sysreg(next->thread.uw.tp_value, tpidrro_el0); in tls_thread_switch()
461 write_sysreg(*task_user_tls(next), tpidr_el0); in tls_thread_switch()
464 /* Restore the UAO state depending on next's addr_limit */
465 void uao_thread_switch(struct task_struct *next) in uao_thread_switch() argument
468 if (task_thread_info(next)->addr_limit == KERNEL_DS) in uao_thread_switch()
479 static void ssbs_thread_switch(struct task_struct *next) in ssbs_thread_switch() argument
485 if (unlikely(next->flags & PF_KTHREAD)) in ssbs_thread_switch()
495 spectre_v4_enable_task_mitigation(next); in ssbs_thread_switch()
507 entry_task_switch(struct task_struct *next) entry_task_switch() argument
517 erratum_1418040_thread_switch(struct task_struct *next) erratum_1418040_thread_switch() argument
539 __switch_to(struct task_struct *prev, struct task_struct *next) __switch_to() argument
[all...]
/kernel/linux/linux-5.10/tools/usb/usbip/libsrc/
H A Dnames.c26 struct vendor *next; member
32 struct product *next; member
38 struct class *next; member
44 struct subclass *next; member
50 struct protocol *next; member
56 struct genericstrtable *next; member
88 for (; v; v = v->next) in names_vendor()
99 for (; p; p = p->next) in names_product()
110 for (; c; c = c->next) in names_class()
121 for (; s; s = s->next) in names_subclass()
143 struct pool *next; global() member
[all...]
/kernel/linux/linux-6.6/tools/usb/usbip/libsrc/
H A Dnames.c26 struct vendor *next; member
32 struct product *next; member
38 struct class *next; member
44 struct subclass *next; member
50 struct protocol *next; member
56 struct genericstrtable *next; member
88 for (; v; v = v->next) in names_vendor()
99 for (; p; p = p->next) in names_product()
110 for (; c; c = c->next) in names_class()
121 for (; s; s = s->next) in names_subclass()
143 struct pool *next; global() member
[all...]
/kernel/linux/linux-5.10/arch/arm/include/asm/
H A Dmmu_context.h105 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
128 switch_mm(struct mm_struct *prev, struct mm_struct *next, in switch_mm() argument
140 !cpumask_empty(mm_cpumask(next)) && in switch_mm()
141 !cpumask_test_cpu(cpu, mm_cpumask(next))) in switch_mm()
144 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { in switch_mm()
145 check_and_switch_context(next, tsk); in switch_mm()
/kernel/linux/linux-5.10/fs/orangefs/
H A Ddir.c11 struct orangefs_dir_part *next; member
141 if (part->next) in parse_readdir()
142 part = part->next; in parse_readdir()
148 new->next = NULL; in parse_readdir()
154 part->next = new; in parse_readdir()
205 * should find the next entry. in fill_from_part()
221 goto next; in fill_from_part()
224 goto next; in fill_from_part()
235 next: in fill_from_part()
251 while (part->next in orangefs_dir_fill()
293 struct orangefs_dir_part *next = part->next; orangefs_dir_llseek() local
390 struct orangefs_dir_part *next = part->next; orangefs_dir_release() local
[all...]

Completed in 15 milliseconds

12345678910>>...270