Home
last modified time | relevance | path

Searched refs:next (Results 6326 - 6350 of 12432) sorted by relevance

1...<<251252253254255256257258259260>>...498

/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/
H A Dtest_cls_redirect.c289 uint8_t next; in pkt_skip_ipv6_extension_headers() member
292 .next = ipv6->nexthdr, in pkt_skip_ipv6_extension_headers()
298 switch (exthdr.next) { in pkt_skip_ipv6_extension_headers()
318 /* Decode next header */ in pkt_skip_ipv6_extension_headers()
322 /* The next header is not one of the known extension in pkt_skip_ipv6_extension_headers()
332 *upper_proto = exthdr.next; in pkt_skip_ipv6_extension_headers()
570 /* Get the next hop from the GLB header.
583 /* Skip "used" next hops. */ in get_next_hop()
587 /* No more next hops, we are at the end of the GLB header. */ in get_next_hop()
596 /* Skip the remainig next hop in get_next_hop()
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/
H A Dtest_lru_map.c135 int next = *next_to_try; in sched_next_online() local
138 while (next < nr_cpus) { in sched_next_online()
140 CPU_SET(next++, &cpuset); in sched_next_online()
147 *next_to_try = next; in sched_next_online()
403 * gone during the next LRU shrink. in test_lru_sanity2()
750 * this will be evicted on next update. in test_lru_sanity7()
839 * this will be evicted on next update. in test_lru_sanity8()
/kernel/linux/linux-5.10/drivers/lightnvm/
H A Dpblk-recovery.c765 goto next; in pblk_recov_l2p()
770 goto next; in pblk_recov_l2p()
775 goto next; in pblk_recov_l2p()
786 next: in pblk_recov_l2p()
831 /* Allocate next line for preparation */ in pblk_recov_l2p()
/kernel/linux/linux-5.10/drivers/gpu/drm/omapdrm/
H A Domap_crtc.c301 * the next vblank interrupt. in omap_crtc_vblank_irq()
366 struct omap_dss_device *dssdev = omap_crtc->pipe->output->next; in omap_crtc_manual_display_update()
505 if (omap_crtc->pipe->output->next == NULL || in omap_crtc_mode_valid()
506 omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) { in omap_crtc_mode_valid()
558 struct omap_dss_device *display = omap_crtc->pipe->output->next; in omap_crtc_is_manually_updated()
/kernel/linux/linux-5.10/drivers/infiniband/core/
H A Duverbs_ioctl.c39 struct bundle_alloc_head *next; member
123 buf->next = pbundle->allocated_mem; in _uverbs_alloc()
537 memblock = memblock->next; in bundle_destroy()
572 pbundle->alloc_head.next = NULL; in ib_uverbs_cmd_verbs()
/kernel/linux/linux-5.10/drivers/net/ethernet/sun/
H A Dsunqe.c484 goto next; in qec_interrupt()
501 next: in qec_interrupt()
983 struct sunqec *next = root_qec_dev->next_module; in qec_exit() local
991 root_qec_dev = next; in qec_exit()
/kernel/linux/linux-5.10/drivers/net/ipvlan/
H A Dipvlan_main.c614 /* Increment id-base to the next slot for the future assignment */ in ipvlan_link_new()
651 struct ipvl_addr *addr, *next; in ipvlan_link_delete() local
654 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { in ipvlan_link_delete()
723 struct ipvl_dev *ipvlan, *next; in ipvlan_device_event() local
757 list_for_each_entry_safe(ipvlan, next, &port->ipvlans, pnode) in ipvlan_device_event()
/kernel/linux/linux-5.10/drivers/s390/char/
H A Dcon3215.c73 struct raw3215_req *next; /* pointer to next request */ member
116 raw3215_freelist = req->next; in raw3215_alloc_req()
132 req->next = raw3215_freelist; in raw3215_free_req()
350 * Try to start the next IO and wake up processes waiting on the tty.
925 req->next = raw3215_freelist; in con3215_init()
/kernel/linux/linux-5.10/drivers/s390/crypto/
H A Dap_queue.c231 /* Start the next request on the queue. */ in ap_sm_write()
232 ap_msg = list_entry(aq->requestq.next, struct ap_message, list); in ap_sm_write()
842 struct ap_message *ap_msg, *next; in __ap_flush_queue() local
844 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { in __ap_flush_queue()
850 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { in __ap_flush_queue()
/kernel/linux/linux-5.10/drivers/perf/
H A Dqcom_l2_pmu.c136 struct list_head next; member
755 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
865 INIT_LIST_HEAD(&cluster->next); in l2_cache_pmu_probe_cluster()
866 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
/kernel/linux/linux-5.10/drivers/tty/hvc/
H A Dhvc_iucv.c191 struct iucv_tty_buffer *ent, *next; in destroy_tty_buffer_list() local
193 list_for_each_entry_safe(ent, next, list, list) { in destroy_tty_buffer_list()
955 struct iucv_tty_buffer *ent, *next; in hvc_iucv_msg_complete() local
959 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) in hvc_iucv_msg_complete()
/kernel/linux/linux-5.10/net/rds/
H A Drecv.c298 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u " in rds_recv_incoming()
317 * to assume that the next valid seq does not come after a in rds_recv_incoming()
410 *inc = list_entry(rs->rs_recv_queue.next, in rds_next_incoming()
476 notifier = list_entry(rs->rs_notify_queue.next, in rds_notify_queue_get()
487 notifier = list_entry(copy.next, struct rds_notifier, n_list); in rds_notify_queue_get()
614 info = list_entry(q->zcookie_head.next, in rds_recvmsg_zcookie()
699 * to get the next message. in rds_recvmsg()
/kernel/linux/linux-5.10/net/sched/
H A Dsch_fq_codel.c120 flow->head = skb->next; in dequeue_head()
132 flow->tail->next = skb; in flow_queue_add()
134 skb->next = NULL; in flow_queue_add()
318 * or HTB crashes. Defer it for next round. in fq_codel_dequeue()
653 skb = skb->next; in fq_codel_dump_class_stats()
/kernel/linux/linux-5.10/tools/perf/ui/stdio/
H A Dhist.c113 struct rb_node *node, *next; in __callchain__fprintf_graph() local
142 next = rb_next(node); in __callchain__fprintf_graph()
143 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) in __callchain__fprintf_graph()
169 node = next; in __callchain__fprintf_graph()
/kernel/linux/linux-5.10/lib/
H A Ddynamic_debug.c781 iter->table = list_entry(ddebug_tables.next, in ddebug_iter_first()
788 * Advance the iterator to point to the next _ddebug
798 /* iterate to next table */ in ddebug_iter_next()
804 iter->table = list_entry(iter->table->link.next, in ddebug_iter_next()
834 * Seq_ops next method. Called several times within a read()
836 * next _ddebug object with a special case for the header line.
890 .next = ddebug_proc_next,
1024 struct ddebug_table *dt = list_entry(ddebug_tables.next, in ddebug_remove_all_tables()
/kernel/linux/linux-5.10/kernel/
H A Dpadata.c137 struct padata_work *cur, *next; in padata_works_free() local
143 list_for_each_entry_safe(cur, next, works, pw_list) { in padata_works_free()
244 * padata_find_next - Find the next object that needs serialization.
247 * * A pointer to the control struct of the next object that needs
249 * * NULL, if the next object that needs serialization will
268 padata = list_entry(reorder->list.next, struct padata_priv, list); in padata_find_next()
300 * queue the next object will arrive takes some time. A spinlock in padata_reorder()
314 * If the next object that needs serialization is parallel in padata_reorder()
334 * The next object that needs serialization might have arrived to in padata_reorder()
378 padata = list_entry(local_list.next, in padata_serial_worker()
[all...]
/kernel/linux/linux-5.10/kernel/locking/
H A Dmutex.c1056 * state back to RUNNING and fall through the next schedule(), in __mutex_lock_common()
1230 struct task_struct *next = NULL; in __mutex_unlock_slowpath() local
1275 next = waiter->task; in __mutex_unlock_slowpath()
1278 wake_q_add(&wake_q, next); in __mutex_unlock_slowpath()
1282 __mutex_handoff(lock, next); in __mutex_unlock_slowpath()
/kernel/linux/linux-5.10/fs/overlayfs/
H A Dnamei.c314 const char *next = strchrnul(s, '/'); in ovl_lookup_layer() local
315 size_t thislen = next - s; in ovl_lookup_layer()
316 bool end = !next[0]; in ovl_lookup_layer()
323 d->name.len - rem, next, &base, in ovl_lookup_layer()
774 * Returns next layer in stack starting from top.
/kernel/linux/linux-5.10/fs/xfs/
H A Dxfs_log_cil.c111 * don't free the old buffer as it may be reused on the next modification if
157 * of the next one is naturally aligned. We'll need to in xlog_cil_alloc_shadow_bufs()
493 struct xfs_log_vec *next = lv->lv_next; in xlog_cil_free_logvec() local
495 lv = next; in xlog_cil_free_logvec()
769 * next (e.g. due to log forces), we do not want the checkpoint with in xlog_cil_push_work()
1030 struct xfs_log_item *lip, *next; in xlog_cil_commit() local
1063 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { in xlog_cil_commit()
/kernel/linux/linux-5.10/net/dsa/
H A Ddsa2.c643 struct dsa_link *dl, *next; in dsa_tree_teardown() local
654 list_for_each_entry_safe(dl, next, &dst->rtable, list) { in dsa_tree_teardown()
947 struct dsa_port *dp, *next; in dsa_switch_release_ports() local
949 list_for_each_entry_safe(dp, next, &dst->ports, list) { in dsa_switch_release_ports()
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/
H A Dnouveau_svm.c172 unsigned long next; in nouveau_svmm_bind() local
179 next = min(vma->vm_end, end); in nouveau_svmm_bind()
182 next); in nouveau_svmm_bind()
183 addr = next; in nouveau_svmm_bind()
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_cmdbuf.c15 * next paragraph) shall be included in all copies or substantial portions
381 struct vmw_cmdbuf_header *entry, *next; in vmw_cmdbuf_ctx_process() local
385 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { in vmw_cmdbuf_ctx_process()
516 struct vmw_cmdbuf_header *entry, *next; in vmw_cmdbuf_work_func() local
529 list_for_each_entry_safe(entry, next, &man->error, list) { in vmw_cmdbuf_work_func()
/kernel/linux/linux-5.10/drivers/media/usb/cpia2/
H A Dcpia2_usb.c139 cam->workbuff = cam->workbuff->next; in process_frame()
233 for (ptr = cam->workbuff->next; in cpia2_usb_complete()
235 ptr = ptr->next) in cpia2_usb_complete()
739 cam->workbuff = cam->curbuff->next; in cpia2_usb_stream_start()
/kernel/linux/linux-5.10/drivers/media/platform/vsp1/
H A Dvsp1_dl.c801 * next item for automatic processing by the hardware. in vsp1_dl_list_fill_header()
803 struct vsp1_dl_list *next = list_next_entry(dl, chain); in vsp1_dl_list_fill_header() local
805 dl->header->next_header = next->dma; in vsp1_dl_list_fill_header()
1000 * next frame end interrupt. in vsp1_dlm_irq_frame_end()
1157 struct vsp1_dl_list *dl, *next; in vsp1_dlm_destroy() local
1162 list_for_each_entry_safe(dl, next, &dlm->free, list) { in vsp1_dlm_destroy()
/kernel/linux/linux-5.10/drivers/nvme/host/
H A Dmultipath.c339 struct bio *bio, *next; in nvme_requeue_work() local
342 next = bio_list_get(&head->requeue_list); in nvme_requeue_work()
345 while ((bio = next) != NULL) { in nvme_requeue_work()
346 next = bio->bi_next; in nvme_requeue_work()

Completed in 37 milliseconds

1...<<251252253254255256257258259260>>...498