/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_jd.c | 417 dep_atom = list_entry(katom->dep_head[d].next, in jd_resolve_dep() 419 list_del(katom->dep_head[d].next); in jd_resolve_dep() 648 * next chunk, we need to account for both. in jd_update_jit_usage() 766 node = list_entry(runnable_jobs.next, in jd_done_nolock() 768 list_del(runnable_jobs.next); in jd_done_nolock() 1569 kctx->completed_jobs.next, in kbase_jd_done_worker() 1571 list_del(kctx->completed_jobs.next); in kbase_jd_done_worker()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_jd.c | 395 dep_atom = list_entry(katom->dep_head[d].next, struct kbase_jd_atom, dep_item[d]); in jd_resolve_dep() 396 list_del(katom->dep_head[d].next); in jd_resolve_dep() 609 * next chunk, we need to account for both. in jd_update_jit_usage() 720 node = list_entry(runnable_jobs.next, struct kbase_jd_atom, jd_item); in jd_done_nolock() 721 list_del(runnable_jobs.next); in jd_done_nolock() 1462 struct kbase_jd_atom *atom = list_entry(kctx->completed_jobs.next, struct kbase_jd_atom, jd_item); in kbase_jd_done_worker() 1463 list_del(kctx->completed_jobs.next); in kbase_jd_done_worker()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_jd.c | 554 dep_atom = list_entry(katom->dep_head[d].next, in jd_resolve_dep() 556 list_del(katom->dep_head[d].next); in jd_resolve_dep() 845 node = list_entry(runnable_jobs.next, in jd_done_nolock() 847 list_del(runnable_jobs.next); in jd_done_nolock() 1604 kctx->completed_jobs.next, in kbase_jd_done_worker() 1606 list_del(kctx->completed_jobs.next); in kbase_jd_done_worker()
|
/kernel/linux/linux-5.10/drivers/firewire/ |
H A D | sbp2.c | 307 struct sbp2_pointer next; member 515 struct sbp2_orb *orb, *next; in sbp2_cancel_orbs() local 524 list_for_each_entry_safe(orb, next, &list, link) { in sbp2_cancel_orbs() 1215 struct sbp2_logical_unit *lu, *next; in sbp2_remove() local 1223 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove() 1458 orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL); in sbp2_scsi_queuecommand()
|
/kernel/linux/linux-5.10/drivers/block/xen-blkback/ |
H A D | blkback.c | 416 req = list_entry(ring->pending_free.next, struct pending_req, in alloc_req() 837 goto next; in xen_blkbk_map() 857 goto next; in xen_blkbk_map() 866 goto next; in xen_blkbk_map() 872 goto next; in xen_blkbk_map() 883 next: in xen_blkbk_map()
|
/kernel/linux/linux-5.10/drivers/dma/ |
H A D | owl-dma.c | 129 * @OWL_DMADESC_NEXT_LLI: physical address of the next link list 365 struct owl_dma_lli *next, in owl_dma_add_lli() 369 list_add_tail(&next->node, &txd->lli_list); in owl_dma_add_lli() 372 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; in owl_dma_add_lli() 377 return next; in owl_dma_add_lli() 660 * Start the next descriptor (if any), in owl_dma_interrupt() 791 /* Start from the next active node */ in owl_dma_getbytes_chan() 1052 struct owl_dma_vchan *next; in owl_dma_free() local 1055 next, &od->dma.channels, vc.chan.device_node) { in owl_dma_free() 363 owl_dma_add_lli(struct owl_dma_txd *txd, struct owl_dma_lli *prev, struct owl_dma_lli *next, bool is_cyclic) owl_dma_add_lli() argument
|
/kernel/linux/linux-5.10/drivers/net/ethernet/i825xx/ |
H A D | sun3_82586.c | 598 p->xmit_buffs[i]->next = 0xffff; in init586() 650 rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); in alloc_rfa() 665 rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); in alloc_rfa() 808 rbd = (struct rbd_struct *) make32(rbd->next); in sun3_82586_rcv_int() 826 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ in sun3_82586_rcv_int() 850 printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); in sun3_82586_rcv_int() 866 rfds = (struct rfd_struct *) make32(rfds->next); in sun3_82586_rcv_int() 1092 /* linkpointer of xmit-command already points to next nop cmd */ in sun3_82586_send_packet()
|
/kernel/linux/linux-5.10/tools/testing/selftests/rseq/ |
H A D | param_test.c | 283 struct percpu_list_node *next; member 528 node->next = (struct percpu_list_node *)expect; in this_cpu_list_push() 558 offset = offsetof(struct percpu_list_node, next); in this_cpu_list_pop() 586 list->c[cpu].head = node->next; in __percpu_list_pop() 642 node->next = list.c[i].head; in test_percpu_list() 1186 node->next = NULL; in test_membarrier_init_percpu_list()
|
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
H A D | command_submission.c | 312 struct hl_cs *next; in cs_do_release() local 319 /* queue TDR for next CS */ in cs_do_release() 320 next = list_first_entry_or_null( in cs_do_release() 324 if ((next) && (!next->tdr_active)) { in cs_do_release() 325 next->tdr_active = true; in cs_do_release() 326 schedule_delayed_work(&next->work_tdr, in cs_do_release()
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_verbs.c | 368 wr = wr->next; in rxe_post_srq_recv() 650 struct ib_send_wr *next; in rxe_post_send_kernel() local 667 next = wr->next; in rxe_post_send_kernel() 679 wr = next; in rxe_post_send_kernel() 740 wr = wr->next; in rxe_post_recv()
|
/kernel/linux/linux-5.10/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
H A D | usb.c | 411 req = list_entry(q->next, struct brcmf_usbreq, list); in brcmf_usb_deq() 412 list_del_init(q->next); in brcmf_usb_deq() 457 req = list_entry(q->next, struct brcmf_usbreq, list); in brcmf_usbdev_qinit() 460 list_del(q->next); in brcmf_usbdev_qinit() 469 struct brcmf_usbreq *req, *next; in brcmf_usb_free_q() local 471 list_for_each_entry_safe(req, next, q, list) { in brcmf_usb_free_q()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | cxgb3_offload.c | 508 p->next = t->afree; in cxgb3_free_atid() 527 p->next = t->sfree; in cxgb3_free_stid() 603 /* use ctx as a next pointer in the tid release list */ 663 t->afree = p->next; in cxgb3_alloc_atid() 685 t->sfree = p->next; in cxgb3_alloc_stid() 1187 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; in init_tid_tabs() 1192 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; in init_tid_tabs()
|
/kernel/linux/linux-5.10/drivers/net/wireless/intel/iwlwifi/queue/ |
H A D | tx.c | 304 * then the next TB may be accessed with the wrong in iwl_txq_gen2_set_tb_with_wa() 495 /* prepare the start_hdr for the next subframe */ in iwl_txq_gen2_build_amsdu() 1033 struct page *next; in iwl_txq_free_tso_page() local 1036 next = *page_ptr; in iwl_txq_free_tso_page() 1039 while (next) { in iwl_txq_free_tso_page() 1040 struct page *tmp = next; in iwl_txq_free_tso_page() 1042 next = *(void **)(page_address(next) + PAGE_SIZE - in iwl_txq_free_tso_page()
|
/kernel/linux/linux-5.10/drivers/net/hamradio/ |
H A D | yam.c | 138 struct yam_mcs *next; member 385 p = p->next; in add_mcs() 395 p->next = yam_data; in add_mcs() 410 p = p->next; in get_mcs() 825 .next = yam_seq_next, 1173 yam_data = yam_data->next; in yam_cleanup_driver()
|
/kernel/linux/linux-5.10/drivers/scsi/fnic/ |
H A D | fnic_fcs.c | 228 struct fnic_event *next = NULL; in fnic_fcoe_evlist_free() local 237 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_fcoe_evlist_free() 248 struct fnic_event *next = NULL; in fnic_handle_event() local 257 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_handle_event() 1306 struct fcoe_vlan *next; in fnic_fcoe_reset_vlans() local 1315 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { in fnic_fcoe_reset_vlans() 1373 * Try the next vlan in fnic_handle_fip_timer() 1391 /* check the next vlan */ in fnic_handle_fip_timer()
|
/kernel/linux/linux-5.10/drivers/pci/hotplug/ |
H A D | ibmphp_ebda.c | 267 "ibmphp_ebda: next read is beyond ebda_sz\n")) in ibmphp_access_ebda() 270 next_offset = readw(io_mem + offset); /* offset of next blk */ in ibmphp_access_ebda() 295 re = readw(io_mem + sub_addr); /* next sub blk */ in ibmphp_access_ebda() 322 rc = readw(io_mem + sub_addr); /* next sub blk */ in ibmphp_access_ebda() 1033 struct bus_info *bus_info, *next; in ibmphp_free_bus_info_queue() local 1035 list_for_each_entry_safe(bus_info, next, &bus_info_head, in ibmphp_free_bus_info_queue() 1043 struct controller *controller = NULL, *next; in ibmphp_free_ebda_hpc_queue() local 1046 list_for_each_entry_safe(controller, next, &ebda_hpc_head, in ibmphp_free_ebda_hpc_queue() 1060 struct ebda_pci_rsrc *resource, *next; in ibmphp_free_ebda_pci_rsrc_queue() local 1062 list_for_each_entry_safe(resource, next, in ibmphp_free_ebda_pci_rsrc_queue() [all...] |
/kernel/linux/linux-5.10/drivers/scsi/ |
H A D | scsi_scan.c | 165 struct async_scan_data *next = list_entry(scanning_hosts.next, in scsi_complete_async_scans() local 167 complete(&next->prev_finished); in scsi_complete_async_scans() 1796 struct async_scan_data *next = list_entry(scanning_hosts.next, in scsi_finish_async_scan() local 1798 complete(&next->prev_finished); in scsi_finish_async_scan()
|
/kernel/linux/linux-5.10/drivers/tty/serial/ |
H A D | ip22zilog.c | 66 struct uart_ip22zilog_port *next; member 229 * until the next TX complete interrupt. Else, we do it right now. 364 /* TX still busy? Just wait for the next TX done interrupt. in ip22zilog_transmit_chars() 458 up = up->next; in ip22zilog_interrupt() 480 up = up->next; in ip22zilog_interrupt() 708 up->next->flags |= IP22ZILOG_FLAG_RESET_DONE; in __ip22zilog_reset() 1093 up[channel].next = &up[channel - 1]; in ip22zilog_prepare() 1094 up[channel].next = NULL; in ip22zilog_prepare()
|
/kernel/linux/linux-5.10/drivers/usb/gadget/udc/ |
H A D | s3c-hsudc.c | 224 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_nuke_ep() 272 * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo. 274 * @hsreq: Transfer request from which the next chunk of data is written. 276 * Write the next chunk of data from a transfer request to the endpoint FIFO. 317 * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo. 319 * @hsreq: Transfer request to which the next chunk of data read is written. 321 * Read the next chunk of data from the endpoint FIFO and a write it to the 395 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_epin_intr() 432 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_epout_intr() 477 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_set_halt() [all...] |
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_netem.c | 153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 154 * and skb->next & skb->prev are scratch space for a qdisc, 163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */ in netem_skb_cb() 208 * next state and if the next packet has to be transmitted or lost. in loss_4state() 261 * next state. A second random number is extracted and the comparison 262 * with the loss probability of the current state decides if the next 385 q->t_tail->next = nskb; in tfifo_enqueue() 499 segs = skb->next; in netem_enqueue() 522 skb->next in netem_enqueue() [all...] |
/kernel/linux/linux-5.10/net/xfrm/ |
H A D | xfrm_interface_core.c | 62 for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next)) 121 rcu_assign_pointer(xi->next , rtnl_dereference(*xip)); in xfrmi_link() 132 xip = &iter->next) { in xfrmi_unlink() 134 rcu_assign_pointer(*xip, xi->next); in xfrmi_unlink() 176 xip = &xi->next) in xfrmi_locate() 812 xip = &xi->next) in xfrmi_exit_batch_net()
|
/kernel/linux/linux-5.10/fs/nfs/ |
H A D | pagelist.c | 64 hdr->req = nfs_list_entry(mirror->pg_list.next); in nfs_pgheader_init() 414 struct nfs_page *tmp, *next; in nfs_page_group_destroy() local 421 next = tmp->wb_this_page; in nfs_page_group_destroy() 426 tmp = next; in nfs_page_group_destroy() 926 req = nfs_list_entry(head->next); in nfs_generic_pgio() 1373 struct nfs_page *req = nfs_list_entry(pages.next); in nfs_pageio_resend()
|
/kernel/linux/linux-5.10/kernel/trace/ |
H A D | trace_output.c | 688 int next = __TRACE_LAST_TYPE; in trace_search_list() local 692 return next; in trace_search_list() 700 if (e->type != next) in trace_search_list() 702 next++; in trace_search_list() 706 if (next > TRACE_EVENT_TYPE_MAX) in trace_search_list() 710 return next; in trace_search_list()
|
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | quota_local.c | 239 struct ocfs2_quota_chunk *pos, *next; in ocfs2_release_local_quota_bitmaps() local 241 list_for_each_entry_safe(pos, next, head, qc_chunk) { in ocfs2_release_local_quota_bitmaps() 318 struct ocfs2_recovery_chunk *next; in free_recovery_list() local 321 list_for_each_entry_safe(rchunk, next, head, rc_list) { in free_recovery_list() 471 struct ocfs2_recovery_chunk *rchunk, *next; in ocfs2_recover_local_quota_file() local 476 list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { in ocfs2_recover_local_quota_file()
|
/kernel/linux/linux-5.10/net/dccp/ |
H A D | feat.c | 543 struct dccp_feat_entry *entry, *next; in dccp_feat_list_purge() local 545 list_for_each_entry_safe(entry, next, fn_list, node) in dccp_feat_list_purge() 629 * @skb: next sk_buff to be sent to the peer 637 struct dccp_feat_entry *pos, *next; in dccp_feat_insert_opts() local 642 list_for_each_entry_safe_reverse(pos, next, fn, node) { in dccp_feat_insert_opts() 1507 struct dccp_feat_entry *cur, *next; in dccp_feat_activate_values() local 1551 list_for_each_entry_safe(cur, next, fn_list, node) in dccp_feat_activate_values()
|