/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 98 /* Check for no next entry in the queue. */ in qib_make_rc_ack() 853 * Set qp->s_sending_psn to the next PSN after the given one. 1114 * continue with the next packet the receiver wants. in do_rc_ack() 1689 unsigned next; in qib_update_ack_queue() local 1691 next = n + 1; in qib_update_ack_queue() 1692 if (next > QIB_MAX_RDMA_ATOMIC) in qib_update_ack_queue() 1693 next = 0; in qib_update_ack_queue() 1694 qp->s_tail_ack_queue = next; in qib_update_ack_queue() 1939 u8 next; in qib_rc_rcv() local 1943 next in qib_rc_rcv() 2011 u8 next; qib_rc_rcv() local [all...] |
/kernel/linux/linux-5.10/drivers/scsi/ |
H A D | scsi_error.c | 202 * Retry after abort failed, escalate to next level. in scsi_abort_command() 716 * get to try and figure out what to do next. if the sense in scsi_eh_completed_normally() 726 * next, check the message byte. in scsi_eh_completed_normally() 1222 struct scsi_cmnd *scmd, *next; in scsi_eh_get_sense() local 1230 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { in scsi_eh_get_sense() 1341 struct scsi_cmnd *scmd, *next; in scsi_eh_test_devices() local 1346 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); in scsi_eh_test_devices() 1366 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry) in scsi_eh_test_devices() 1418 struct scsi_cmnd *scmd, *stu_scmd, *next; in scsi_eh_stu() local 1449 list_for_each_entry_safe(scmd, next, in scsi_eh_stu() 1484 struct scsi_cmnd *scmd, *bdr_scmd, *next; scsi_eh_bus_device_reset() local 1552 struct scsi_cmnd *next, *scmd; scsi_eh_target_reset() local 1608 struct scsi_cmnd *scmd, *chan_scmd, *next; scsi_eh_bus_reset() local 1680 struct scsi_cmnd *scmd, *next; scsi_eh_host_reset() local 1718 struct scsi_cmnd *scmd, *next; scsi_eh_offline_sdevs() local 2100 struct scsi_cmnd *scmd, *next; scsi_eh_flush_done_q() local [all...] |
/kernel/linux/linux-5.10/security/smack/ |
H A D | smackfs.c | 632 .next = load2_seq_next, 800 .next = cipso_seq_next, 996 .next = cipso_seq_next, 1072 .next = net4addr_seq_next, 1109 m = list_entry_rcu(smk_net4addr_list.next, in smk_net4addr_insert() 1123 m_next = list_entry_rcu(m->list.next, in smk_net4addr_insert() 1336 .next = net6addr_seq_next, 1373 m = list_entry_rcu(smk_net6addr_list.next, in smk_net6addr_insert() 1386 m_next = list_entry_rcu(m->list.next, in smk_net6addr_insert() 1900 .next [all...] |
/kernel/linux/linux-5.10/tools/power/x86/intel-speed-select/ |
H A D | isst-config.c | 2343 char *next; in parse_cpu_command() local 2345 next = optarg; in parse_cpu_command() 2347 while (next && *next) { in parse_cpu_command() 2348 if (*next == '-') /* no negative cpu numbers */ in parse_cpu_command() 2351 start = strtoul(next, &next, 10); in parse_cpu_command() 2356 if (*next == '\0') in parse_cpu_command() 2359 if (*next == ',') { in parse_cpu_command() 2360 next in parse_cpu_command() [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | raid56.c | 94 * the stripe lock to the next pending IO 434 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache() 666 * the next rbio on the list is run and the IO is started automatically. 755 * rbios waiting for this stripe, the next one on the list will be started 796 struct btrfs_raid_bio *next; in unlock_stripe() local 797 struct list_head *head = rbio->plug_list.next; in unlock_stripe() 799 next = list_entry(head, struct btrfs_raid_bio, in unlock_stripe() 804 list_add(&next->hash_list, &h->hash_list); in unlock_stripe() 805 refcount_inc(&next->refs); in unlock_stripe() 809 if (next in unlock_stripe() 858 struct bio *next; rbio_endio_bio_list() local [all...] |
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/i2c/ |
H A D | atomisp-mt9m114.c | 286 const struct misensor_reg *next) in __mt9m114_buf_reg_array() 293 if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) { in __mt9m114_buf_reg_array() 299 switch (next->length) { in __mt9m114_buf_reg_array() 301 ctrl->buffer.data[ctrl->index] = (u8)next->val; in __mt9m114_buf_reg_array() 305 *data16 = cpu_to_be16((u16)next->val); in __mt9m114_buf_reg_array() 309 *data32 = cpu_to_be32(next->val); in __mt9m114_buf_reg_array() 317 ctrl->buffer.addr = next->reg; in __mt9m114_buf_reg_array() 319 ctrl->index += next->length; in __mt9m114_buf_reg_array() 327 const struct misensor_reg *next) in __mt9m114_write_reg_is_consecutive() 332 return ctrl->buffer.addr + ctrl->index == next in __mt9m114_write_reg_is_consecutive() 284 __mt9m114_buf_reg_array(struct i2c_client *client, struct mt9m114_write_ctrl *ctrl, const struct misensor_reg *next) __mt9m114_buf_reg_array() argument 325 __mt9m114_write_reg_is_consecutive(struct i2c_client *client, struct mt9m114_write_ctrl *ctrl, const struct misensor_reg *next) __mt9m114_write_reg_is_consecutive() argument 353 const struct misensor_reg *next = reglist; mt9m114_write_reg_array() local [all...] |
/kernel/linux/linux-6.6/drivers/scsi/ |
H A D | scsi_error.c | 241 * Retry after abort failed, escalate to next level. in scsi_abort_command() 798 * get to try and figure out what to do next. if the sense in scsi_eh_completed_normally() 1306 struct scsi_cmnd *scmd, *next; in scsi_eh_get_sense() local 1314 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { in scsi_eh_get_sense() 1425 struct scsi_cmnd *scmd, *next; in scsi_eh_test_devices() local 1430 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); in scsi_eh_test_devices() 1450 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry) in scsi_eh_test_devices() 1503 struct scsi_cmnd *scmd, *stu_scmd, *next; in scsi_eh_stu() local 1534 list_for_each_entry_safe(scmd, next, in scsi_eh_stu() 1569 struct scsi_cmnd *scmd, *bdr_scmd, *next; in scsi_eh_bus_device_reset() local 1637 struct scsi_cmnd *next, *scmd; scsi_eh_target_reset() local 1693 struct scsi_cmnd *scmd, *chan_scmd, *next; scsi_eh_bus_reset() local 1765 struct scsi_cmnd *scmd, *next; scsi_eh_host_reset() local 1803 struct scsi_cmnd *scmd, *next; scsi_eh_offline_sdevs() local 2197 struct scsi_cmnd *scmd, *next; scsi_eh_flush_done_q() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 98 /* Check for no next entry in the queue. */ in qib_make_rc_ack() 854 * Set qp->s_sending_psn to the next PSN after the given one. 1115 * continue with the next packet the receiver wants. in do_rc_ack() 1692 unsigned next; in qib_update_ack_queue() local 1694 next = n + 1; in qib_update_ack_queue() 1695 if (next > QIB_MAX_RDMA_ATOMIC) in qib_update_ack_queue() 1696 next = 0; in qib_update_ack_queue() 1697 qp->s_tail_ack_queue = next; in qib_update_ack_queue() 1942 u8 next; in qib_rc_rcv() local 1946 next in qib_rc_rcv() 2014 u8 next; qib_rc_rcv() local [all...] |
/kernel/linux/linux-6.6/tools/power/x86/intel-speed-select/ |
H A D | isst-config.c | 2660 char *next; in parse_cpu_command() local 2662 next = optarg; in parse_cpu_command() 2665 while (next && *next) { in parse_cpu_command() 2666 if (*next == '-') /* no negative cpu numbers */ in parse_cpu_command() 2669 start = strtoul(next, &next, 10); in parse_cpu_command() 2676 if (*next == '\0') in parse_cpu_command() 2679 if (*next == ',') { in parse_cpu_command() 2680 next in parse_cpu_command() [all...] |
/third_party/python/Modules/ |
H A D | _xxsubinterpretersmodule.c | 360 struct _channelitem *next; member 372 item->next = NULL; in _channelitem_new() 384 item->next = NULL; in _channelitem_clear() 399 item = item->next; in _channelitem_free_all() 463 queue->last->next = item; in _channelqueue_put() 476 queue->first = item->next; in _channelqueue_get() 490 struct _channelend *next; member 503 end->next = NULL; in _channelend_new() 520 end = end->next; in _channelend_free_all() 535 end = end->next; in _channelend_find() 876 struct _channelref *next; global() member [all...] |
/kernel/linux/linux-5.10/arch/powerpc/platforms/powernv/ |
H A D | opal.c | 734 /* HMI exception handler called in virtual mode when irqs are next enabled. */ 1159 struct opal_sg_list *next; in opal_vmalloc_to_sg_list() local 1161 next = kzalloc(PAGE_SIZE, GFP_KERNEL); in opal_vmalloc_to_sg_list() 1162 if (!next) in opal_vmalloc_to_sg_list() 1168 sg->next = cpu_to_be64(__pa(next)); in opal_vmalloc_to_sg_list() 1169 sg = next; in opal_vmalloc_to_sg_list() 1189 uint64_t next = be64_to_cpu(sg->next); in opal_free_sg_list() local 1193 if (next) in opal_free_sg_list() [all...] |
/kernel/linux/linux-6.6/arch/powerpc/platforms/powernv/ |
H A D | opal.c | 734 /* HMI exception handler called in virtual mode when irqs are next enabled. */ 1161 struct opal_sg_list *next; in opal_vmalloc_to_sg_list() local 1163 next = kzalloc(PAGE_SIZE, GFP_KERNEL); in opal_vmalloc_to_sg_list() 1164 if (!next) in opal_vmalloc_to_sg_list() 1170 sg->next = cpu_to_be64(__pa(next)); in opal_vmalloc_to_sg_list() 1171 sg = next; in opal_vmalloc_to_sg_list() 1191 uint64_t next = be64_to_cpu(sg->next); in opal_free_sg_list() local 1195 if (next) in opal_free_sg_list() [all...] |
/kernel/linux/linux-5.10/drivers/media/mc/ |
H A D | mc-entity.c | 249 graph->stack[graph->top].link = entity->links.next; in stack_push() 312 struct media_entity *next; in media_graph_walk_iter() local 318 link_top(graph) = link_top(graph)->next; in media_graph_walk_iter() 327 next = media_entity_other(entity, link); in media_graph_walk_iter() 330 if (media_entity_enum_test_and_set(&graph->ent_enum, next)) { in media_graph_walk_iter() 331 link_top(graph) = link_top(graph)->next; in media_graph_walk_iter() 334 next->name); in media_graph_walk_iter() 339 link_top(graph) = link_top(graph)->next; in media_graph_walk_iter() 340 stack_push(graph, next); in media_graph_walk_iter() 342 next in media_graph_walk_iter() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | rw.c | 80 reg->inv_wr.next = ®->reg_wr.wr; in rdma_rw_inv_key() 83 reg->inv_wr.next = NULL; in rdma_rw_inv_key() 151 prev->wr.wr.next = ®->inv_wr; in rdma_rw_init_mr_wrs() 153 prev->wr.wr.next = ®->reg_wr.wr; in rdma_rw_init_mr_wrs() 156 reg->reg_wr.wr.next = ®->wr.wr; in rdma_rw_init_mr_wrs() 181 prev->wr.wr.next = NULL; in rdma_rw_init_mr_wrs() 237 rdma_wr->wr.next = i + 1 < ctx->nr_ops ? in rdma_rw_init_map_wrs() 461 ctx->reg->reg_wr.wr.next = &rdma_wr->wr; in rdma_rw_ctx_signature_init() 522 if (ctx->reg[0].inv_wr.next) in rdma_rw_ctx_wrs() 541 last_wr->next in rdma_rw_ctx_wrs() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | l2t.c | 61 struct l2t_entry *rover; /* starting point for next allocation */ 287 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in alloc_l2e() 289 *p = e->next; in alloc_l2e() 290 e->next = NULL; in alloc_l2e() 329 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in find_or_alloc_l2e() 331 *p = e->next; in find_or_alloc_l2e() 332 e->next = NULL; in find_or_alloc_l2e() 394 * Update an L2T entry that was previously used for the same next hop as neigh. 440 for (e = d->l2tab[hash].first; e; e = e->next) in cxgb4_l2t_get() 464 e->next in cxgb4_l2t_get() [all...] |
/kernel/linux/linux-5.10/fs/afs/ |
H A D | flock.c | 113 struct file_lock *p, *_p, *next = NULL; in afs_next_locker() local 131 /* Select the next locker to hand off to. */ in afs_next_locker() 132 if (next && in afs_next_locker() 133 (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK)) in afs_next_locker() 135 next = p; in afs_next_locker() 141 if (next) { in afs_next_locker() 143 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; in afs_next_locker() 144 trace_afs_flock_op(vnode, next, afs_flock_op_wake); in afs_next_locker() 145 wake_up(&next->fl_wait); in afs_next_locker() 165 p = list_entry(vnode->pending_locks.next, in afs_kill_lockers_enoent() [all...] |
/kernel/linux/linux-5.10/fs/dlm/ |
H A D | debug_fs.c | 468 * move to the first rsb in the next non-empty bucket in table_seq_start() 504 struct rb_node *next; in table_seq_next() local 513 * move to the next rsb in the same bucket in table_seq_next() 518 next = rb_next(&rp->res_hashnode); in table_seq_next() 520 if (next) { in table_seq_next() 521 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next() 533 * move to the first rsb in the next non-empty bucket in table_seq_next() 552 next = rb_first(tree); in table_seq_next() 553 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next() 577 .next [all...] |
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | devmap.c | 227 struct hlist_node *next; in dev_map_free() local 231 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free() 265 u32 *next = next_key; in dev_map_get_next_key() local 268 *next = 0; in dev_map_get_next_key() 274 *next = index + 1; in dev_map_get_next_key() 296 u32 idx, *next = next_key; in dev_map_hash_get_next_key() local 314 *next = next_dev->idx; in dev_map_hash_get_next_key() 329 *next = next_dev->idx; in dev_map_hash_get_next_key() 792 struct hlist_node *next; in dev_map_hash_remove_netdev() local 796 hlist_for_each_entry_safe(dev, next, hea in dev_map_hash_remove_netdev() [all...] |
/kernel/linux/linux-5.10/fs/ |
H A D | seq_file.c | 48 * element of sequence. @op->stop() shuts it down. @op->next() 49 * returns the next element of sequence. @op->show() prints element 50 * into the buffer. In case of error ->start() and ->next() return 120 p = m->op->next(m, p, &m->index); in traverse() 236 p = m->op->next(m, p, &m->index); in seq_read_iter() 262 p = m->op->next(m, p, &m->index); in seq_read_iter() 264 pr_info_ratelimited("buggy .next function %ps did not update position index\n", in seq_read_iter() 265 m->op->next); in seq_read_iter() 268 if (!p || IS_ERR(p)) // no next record for us in seq_read_iter() 569 op->next in single_open() [all...] |
/kernel/linux/linux-5.10/drivers/staging/android/ |
H A D | ashmem.c | 319 struct ashmem_range *range, *next; in ashmem_release() local 322 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) in ashmem_release() 678 struct ashmem_range *range, *next; in ashmem_pin() local 687 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { in ashmem_pin() 752 struct ashmem_range *range, *next; in ashmem_unpin() local 766 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { in ashmem_unpin() 897 struct ashmem_range *range, *next; in ashmem_shrink_by_id() local 903 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { in ashmem_shrink_by_id() 941 struct ashmem_range *range, *next; in is_ashmem_unpin() local 949 list_for_each_entry_safe(range, next, in is_ashmem_unpin() [all...] |
/kernel/linux/linux-6.6/fs/afs/ |
H A D | flock.c | 113 struct file_lock *p, *_p, *next = NULL; in afs_next_locker() local 131 /* Select the next locker to hand off to. */ in afs_next_locker() 132 if (next && in afs_next_locker() 133 (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK)) in afs_next_locker() 135 next = p; in afs_next_locker() 141 if (next) { in afs_next_locker() 143 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; in afs_next_locker() 144 trace_afs_flock_op(vnode, next, afs_flock_op_wake); in afs_next_locker() 145 wake_up(&next->fl_wait); in afs_next_locker() 165 p = list_entry(vnode->pending_locks.next, in afs_kill_lockers_enoent() [all...] |
/kernel/linux/linux-6.6/fs/nfs/ |
H A D | direct.c | 278 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_read_completion() 299 req = nfs_list_entry(head->next); in nfs_read_sync_pgio_error() 561 req = nfs_list_entry(reqs.next); in nfs_direct_write_reschedule() 583 req = nfs_list_entry(reqs.next); in nfs_direct_write_reschedule() 621 req = nfs_list_entry(data->pages.next); in nfs_direct_commit_complete() 694 req = nfs_list_entry(reqs.next); in nfs_direct_write_clear_reqs() 732 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_completion() 756 req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_completion() 782 req = nfs_list_entry(head->next); in nfs_write_sync_pgio_error() 803 req = nfs_list_entry(hdr->pages.next); in nfs_direct_write_reschedule_io() [all...] |
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | delayed-ref.c | 369 * is given, the next bigger entry is returned if no exact match is found. 443 struct btrfs_delayed_ref_node *next; in merge_ref() local 450 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); in merge_ref() 452 if (seq && next->seq >= seq) in merge_ref() 454 if (comp_refs(ref, next, false)) in merge_ref() 457 if (ref->action == next->action) { in merge_ref() 458 mod = next->ref_mod; in merge_ref() 460 if (ref->ref_mod < next->ref_mod) { in merge_ref() 461 swap(ref, next); in merge_ref() 464 mod = -next in merge_ref() [all...] |
/kernel/linux/linux-6.6/fs/ |
H A D | seq_file.c | 48 * element of sequence. @op->stop() shuts it down. @op->next() 49 * returns the next element of sequence. @op->show() prints element 50 * into the buffer. In case of error ->start() and ->next() return 120 p = m->op->next(m, p, &m->index); in traverse() 236 p = m->op->next(m, p, &m->index); in seq_read_iter() 262 p = m->op->next(m, p, &m->index); in seq_read_iter() 264 pr_info_ratelimited("buggy .next function %ps did not update position index\n", in seq_read_iter() 265 m->op->next); in seq_read_iter() 268 if (!p || IS_ERR(p)) // no next record for us in seq_read_iter() 580 op->next in single_open() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | l2t.c | 61 struct l2t_entry *rover; /* starting point for next allocation */ 287 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in alloc_l2e() 289 *p = e->next; in alloc_l2e() 290 e->next = NULL; in alloc_l2e() 329 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in find_or_alloc_l2e() 331 *p = e->next; in find_or_alloc_l2e() 332 e->next = NULL; in find_or_alloc_l2e() 394 * Update an L2T entry that was previously used for the same next hop as neigh. 440 for (e = d->l2tab[hash].first; e; e = e->next) in cxgb4_l2t_get() 464 e->next in cxgb4_l2t_get() [all...] |