/kernel/linux/linux-6.6/drivers/platform/surface/aggregator/ |
H A D | ssh_packet_layer.c | 699 struct list_head *head; in __ssh_ptl_queue_find_entrypoint() local 718 list_for_each(head, &p->ptl->queue.head) { in __ssh_ptl_queue_find_entrypoint() 719 q = list_entry(head, struct ssh_packet, queue_node); in __ssh_ptl_queue_find_entrypoint() 725 list_for_each_prev(head, &p->ptl->queue.head) { in __ssh_ptl_queue_find_entrypoint() 726 q = list_entry(head, struct ssh_packet, queue_node); in __ssh_ptl_queue_find_entrypoint() 729 head = head->next; in __ssh_ptl_queue_find_entrypoint() 735 return head; in __ssh_ptl_queue_find_entrypoint() 742 struct list_head *head; __ssh_ptl_queue_push() local [all...] |
/kernel/linux/linux-6.6/tools/perf/arch/x86/util/ |
H A D | intel-pt.c | 1044 void *data, size_t head) in intel_pt_compare_ref() 1049 if (head > ref_offset || head < ref_end - buf_size) in intel_pt_compare_ref() 1051 } else if (head > ref_offset && head < ref_end) { in intel_pt_compare_ref() 1060 void *data, size_t head) in intel_pt_copy_ref() 1062 if (head >= ref_size) { in intel_pt_copy_ref() 1063 memcpy(ref_buf, data + head - ref_size, ref_size); in intel_pt_copy_ref() 1065 memcpy(ref_buf, data, head); in intel_pt_copy_ref() 1066 ref_size -= head; in intel_pt_copy_ref() 1042 intel_pt_compare_ref(void *ref_buf, size_t ref_offset, size_t ref_size, size_t buf_size, void *data, size_t head) intel_pt_compare_ref() argument 1059 intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size, void *data, size_t head) intel_pt_copy_ref() argument 1071 intel_pt_wrapped(struct intel_pt_recording *ptr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 head) intel_pt_wrapped() argument 1105 intel_pt_find_snapshot(struct auxtrace_record *itr, int idx, struct auxtrace_mmap *mm, unsigned char *data, u64 *head, u64 *old) intel_pt_find_snapshot() argument [all...] |
/kernel/linux/linux-5.10/net/batman-adv/ |
H A D | bridge_loop_avoidance.c | 223 struct hlist_head *head; in batadv_claim_hash_find() local 232 head = &hash->table[index]; in batadv_claim_hash_find() 235 hlist_for_each_entry_rcu(claim, head, hash_entry) { in batadv_claim_hash_find() 263 struct hlist_head *head; in batadv_backbone_hash_find() local 275 head = &hash->table[index]; in batadv_backbone_hash_find() 278 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { in batadv_backbone_hash_find() 303 struct hlist_head *head; in batadv_bla_del_backbone_claims() local 313 head = &hash->table[i]; in batadv_bla_del_backbone_claims() 318 head, hash_entry) { in batadv_bla_del_backbone_claims() 599 struct hlist_head *head; in batadv_bla_answer_request() local 1228 struct hlist_head *head; batadv_bla_purge_backbone_gw() local 1283 struct hlist_head *head; batadv_bla_purge_claims() local 1338 struct hlist_head *head; batadv_bla_update_orig_address() local 1435 struct hlist_head *head; batadv_bla_periodic_work() local 1742 struct hlist_head *head; batadv_bla_is_backbone_gw_orig() local 2144 struct hlist_head *head; batadv_bla_claim_table_seq_print_text() local 2377 struct hlist_head *head; batadv_bla_backbone_table_seq_print_text() local [all...] |
/kernel/linux/linux-5.10/drivers/acpi/ |
H A D | utils.c | 50 u8 *head = NULL; in acpi_extract_package() local 176 head = buffer->pointer; in acpi_extract_package() 192 *((u64 *) head) = in acpi_extract_package() 194 head += sizeof(u64); in acpi_extract_package() 197 pointer = (u8 **) head; in acpi_extract_package() 201 head += sizeof(u64 *); in acpi_extract_package() 217 pointer = (u8 **) head; in acpi_extract_package() 221 head += sizeof(char *); in acpi_extract_package() 228 pointer = (u8 **) head; in acpi_extract_package() 232 head in acpi_extract_package() [all...] |
/kernel/linux/linux-5.10/sound/soc/sof/ |
H A D | loader.c | 202 const struct sof_ext_man_header *head; in snd_sof_ext_man_size() local 204 head = (struct sof_ext_man_header *)fw->data; in snd_sof_ext_man_size() 208 * it prevents from reading unallocated memory from `head` in following in snd_sof_ext_man_size() 211 if (fw->size < sizeof(*head)) in snd_sof_ext_man_size() 218 if (head->magic == SOF_EXT_MAN_MAGIC_NUMBER) in snd_sof_ext_man_size() 219 return head->full_size; in snd_sof_ext_man_size() 230 const struct sof_ext_man_header *head; in snd_sof_fw_ext_man_parse() local 236 head = (struct sof_ext_man_header *)fw->data; in snd_sof_fw_ext_man_parse() 237 remaining = head->full_size - head in snd_sof_fw_ext_man_parse() [all...] |
/kernel/linux/linux-5.10/fs/nfs/flexfilelayout/ |
H A D | flexfilelayoutdev.c | 227 struct list_head *head = &flo->error_list; in ff_layout_add_ds_error_locked() local 237 head = &err->list; in ff_layout_add_ds_error_locked() 247 list_add_tail(&dserr->list, head); in ff_layout_add_ds_error_locked() 459 void ff_layout_free_ds_ioerr(struct list_head *head) in ff_layout_free_ds_ioerr() argument 463 while (!list_empty(head)) { in ff_layout_free_ds_ioerr() 464 err = list_first_entry(head, in ff_layout_free_ds_ioerr() 473 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) in ff_layout_encode_ds_ioerr() argument 478 list_for_each_entry(err, head, list) { in ff_layout_encode_ds_ioerr() 508 struct list_head *head, in do_layout_fetch_ds_ioerr() 525 list_move(&err->list, head); in do_layout_fetch_ds_ioerr() 506 do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, const struct pnfs_layout_range *range, struct list_head *head, unsigned int maxnum) do_layout_fetch_ds_ioerr() argument 533 ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, const struct pnfs_layout_range *range, struct list_head *head, unsigned int maxnum) ff_layout_fetch_ds_ioerr() argument [all...] |
/kernel/linux/linux-5.10/net/ipv6/ila/ |
H A D | ila_xlat.c | 207 struct ila_map *ila, *head; in ila_add_mapping() local 235 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, in ila_add_mapping() 238 if (!head) { in ila_add_mapping() 243 struct ila_map *tila = head, *prev = NULL; in ila_add_mapping() 260 /* Insert in sub list of head */ in ila_add_mapping() 264 /* Make this ila new head */ in ila_add_mapping() 265 RCU_INIT_POINTER(ila->next, head); in ila_add_mapping() 267 &head->node, in ila_add_mapping() 286 struct ila_map *ila, *head, *prev; in ila_del_mapping() local 292 head in ila_del_mapping() [all...] |
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | core.c | 69 struct nf_hook_entries_rcu_head *head; in __nf_hook_entries_free() local 71 head = container_of(h, struct nf_hook_entries_rcu_head, head); in __nf_hook_entries_free() 72 kvfree(head->allocation); in __nf_hook_entries_free() 77 struct nf_hook_entries_rcu_head *head; in nf_hook_entries_free() local 86 head = (void *)&ops[num]; in nf_hook_entries_free() 87 head->allocation = e; in nf_hook_entries_free() 88 call_rcu(&head->head, __nf_hook_entries_free); in nf_hook_entries_free() 611 void nf_hook_slow_list(struct list_head *head, struc argument [all...] |
/kernel/linux/linux-5.10/fs/gfs2/ |
H A D | aops.c | 43 struct buffer_head *head = page_buffers(page); in gfs2_page_add_databufs() local 44 unsigned int bsize = head->b_size; in gfs2_page_add_databufs() 49 for (bh = head, start = 0; bh != head || !start; in gfs2_page_add_databufs() 62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block 65 * @bh_result: The buffer head to return the result in 686 struct buffer_head *bh, *head; in gfs2_invalidatepage() local 695 bh = head = page_buffers(page); in gfs2_invalidatepage() 704 } while (bh != head); in gfs2_invalidatepage() 725 struct buffer_head *bh, *head; in gfs2_releasepage() local [all...] |
/kernel/linux/linux-6.6/drivers/acpi/ |
H A D | utils.c | 40 u8 *head = NULL; in acpi_extract_package() local 157 head = buffer->pointer; in acpi_extract_package() 173 *((u64 *) head) = in acpi_extract_package() 175 head += sizeof(u64); in acpi_extract_package() 178 pointer = (u8 **) head; in acpi_extract_package() 182 head += sizeof(u64 *); in acpi_extract_package() 198 pointer = (u8 **) head; in acpi_extract_package() 202 head += sizeof(char *); in acpi_extract_package() 209 pointer = (u8 **) head; in acpi_extract_package() 213 head in acpi_extract_package() [all...] |
/kernel/linux/linux-6.6/fs/nfs/flexfilelayout/ |
H A D | flexfilelayoutdev.c | 229 struct list_head *head = &flo->error_list; in ff_layout_add_ds_error_locked() local 239 head = &err->list; in ff_layout_add_ds_error_locked() 249 list_add_tail(&dserr->list, head); in ff_layout_add_ds_error_locked() 461 void ff_layout_free_ds_ioerr(struct list_head *head) in ff_layout_free_ds_ioerr() argument 465 while (!list_empty(head)) { in ff_layout_free_ds_ioerr() 466 err = list_first_entry(head, in ff_layout_free_ds_ioerr() 475 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) in ff_layout_encode_ds_ioerr() argument 480 list_for_each_entry(err, head, list) { in ff_layout_encode_ds_ioerr() 510 struct list_head *head, in do_layout_fetch_ds_ioerr() 527 list_move(&err->list, head); in do_layout_fetch_ds_ioerr() 508 do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, const struct pnfs_layout_range *range, struct list_head *head, unsigned int maxnum) do_layout_fetch_ds_ioerr() argument 535 ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, const struct pnfs_layout_range *range, struct list_head *head, unsigned int maxnum) ff_layout_fetch_ds_ioerr() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/wireless/mediatek/mt76/ |
H A D | testmode.c | 107 struct sk_buff **frag_tail, *head; in mt76_testmode_alloc_skb() local 125 head = alloc_skb(head_len, GFP_KERNEL); in mt76_testmode_alloc_skb() 126 if (!head) in mt76_testmode_alloc_skb() 129 hdr = __skb_put_zero(head, sizeof(*hdr)); in mt76_testmode_alloc_skb() 134 skb_set_queue_mapping(head, IEEE80211_AC_BE); in mt76_testmode_alloc_skb() 135 get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)), in mt76_testmode_alloc_skb() 138 info = IEEE80211_SKB_CB(head); in mt76_testmode_alloc_skb() 144 frag_tail = &skb_shinfo(head)->frag_list; in mt76_testmode_alloc_skb() 158 dev_kfree_skb(head); in mt76_testmode_alloc_skb() 163 head in mt76_testmode_alloc_skb() [all...] |
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | core.c | 69 struct nf_hook_entries_rcu_head *head; in __nf_hook_entries_free() local 71 head = container_of(h, struct nf_hook_entries_rcu_head, head); in __nf_hook_entries_free() 72 kvfree(head->allocation); in __nf_hook_entries_free() 77 struct nf_hook_entries_rcu_head *head; in nf_hook_entries_free() local 86 head = (void *)&ops[num]; in nf_hook_entries_free() 87 head->allocation = e; in nf_hook_entries_free() 88 call_rcu(&head->head, __nf_hook_entries_free); in nf_hook_entries_free() 654 void nf_hook_slow_list(struct list_head *head, struc argument [all...] |
/kernel/linux/linux-6.6/net/ipv6/ila/ |
H A D | ila_xlat.c | 206 struct ila_map *ila, *head; in ila_add_mapping() local 234 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, in ila_add_mapping() 237 if (!head) { in ila_add_mapping() 242 struct ila_map *tila = head, *prev = NULL; in ila_add_mapping() 259 /* Insert in sub list of head */ in ila_add_mapping() 263 /* Make this ila new head */ in ila_add_mapping() 264 RCU_INIT_POINTER(ila->next, head); in ila_add_mapping() 266 &head->node, in ila_add_mapping() 285 struct ila_map *ila, *head, *prev; in ila_del_mapping() local 291 head in ila_del_mapping() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/ |
H A D | i915_perf.c | 214 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 471 * could result in an OA buffer reset which might reset the head and in oa_buffer_check_unlocked() 493 u32 head, tail, aged_tail; in oa_buffer_check_unlocked() local 495 /* NB: The head we observe here might effectively be a little in oa_buffer_check_unlocked() 496 * out of date. If a read() is in progress, the head could be in oa_buffer_check_unlocked() 497 * anywhere between this head and stream->oa_buffer.tail. in oa_buffer_check_unlocked() 499 head = stream->oa_buffer.head - gtt_offset; in oa_buffer_check_unlocked() 527 DRM_NOTE("unlanded report(s) head in oa_buffer_check_unlocked() 656 u32 head, tail; gen8_append_oa_reports() local 953 u32 head, tail; gen7_append_oa_reports() local [all...] |
/kernel/linux/linux-5.10/drivers/block/aoe/ |
H A D | aoecmd.c | 55 struct list_head head; member 81 struct list_head *head, *pos, *nx; in getframe_deferred() local 84 head = &d->rexmitq; in getframe_deferred() 85 list_for_each_safe(pos, nx, head) { in getframe_deferred() 86 f = list_entry(pos, struct frame, head); in getframe_deferred() 99 struct list_head *head, *pos, *nx; in getframe() local 103 head = &d->factive[n]; in getframe() 104 list_for_each_safe(pos, nx, head) { in getframe() 105 f = list_entry(pos, struct frame, head); in getframe() 202 list_add(&f->head, in aoe_freetframe() 643 struct list_head *pos, *nx, *head; rexmit_deferred() local 733 struct list_head *head, *pos, *nx; rexmit_timer() local [all...] |
/kernel/linux/linux-5.10/kernel/futex/ |
H A D | core.c | 562 * Mapping checks require the head page for any compound page so the in get_futex_key() 563 * head page and mapping is looked up now. For anonymous pages, it in get_futex_key() 853 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() local 866 while (!list_empty(head)) { in exit_pi_state_list() 867 next = head->next; in exit_pi_state_list() 897 if (head->next != next) { in exit_pi_state_list() 3341 * sys_set_robust_list() - Set the robust-futex list head of a task 3342 * @head: pointer to the list-head 3343 * @len: length of the list-head, a 3371 struct robust_list_head __user *head; SYSCALL_DEFINE3() local 3529 fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsigned int *pi) fetch_robust_entry() argument 3552 struct robust_list_head __user *head = curr->robust_list; exit_robust_list() local 3824 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) compat_fetch_robust_entry() argument 3853 struct compat_robust_list_head __user *head = curr->compat_robust_list; compat_exit_robust_list() local 3941 struct compat_robust_list_head __user *head; COMPAT_SYSCALL_DEFINE3() local [all...] |
/kernel/linux/linux-6.6/drivers/block/aoe/ |
H A D | aoecmd.c | 54 struct list_head head; member 80 struct list_head *head, *pos, *nx; in getframe_deferred() local 83 head = &d->rexmitq; in getframe_deferred() 84 list_for_each_safe(pos, nx, head) { in getframe_deferred() 85 f = list_entry(pos, struct frame, head); in getframe_deferred() 98 struct list_head *head, *pos, *nx; in getframe() local 102 head = &d->factive[n]; in getframe() 103 list_for_each_safe(pos, nx, head) { in getframe() 104 f = list_entry(pos, struct frame, head); in getframe() 201 list_add(&f->head, in aoe_freetframe() 642 struct list_head *pos, *nx, *head; rexmit_deferred() local 732 struct list_head *head, *pos, *nx; rexmit_timer() local [all...] |
/kernel/linux/linux-5.10/drivers/tty/serial/jsm/ |
H A D | jsm_neo.c | 282 u16 head; in neo_copy_data_from_uart_to_queue() local 285 /* cache head and tail of queue */ in neo_copy_data_from_uart_to_queue() 286 head = ch->ch_r_head & RQUEUEMASK; in neo_copy_data_from_uart_to_queue() 294 if ((qleft = tail - head - 1) < 0) in neo_copy_data_from_uart_to_queue() 344 n = min(((u32) total), (RQUEUESIZE - (u32) head)); in neo_copy_data_from_uart_to_queue() 365 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); in neo_copy_data_from_uart_to_queue() 371 memset(ch->ch_equeue + head, 0, n); in neo_copy_data_from_uart_to_queue() 373 /* Add to and flip head if needed */ in neo_copy_data_from_uart_to_queue() 374 head = (head in neo_copy_data_from_uart_to_queue() 476 u16 head; neo_copy_data_from_queue_to_uart() local [all...] |
/kernel/linux/linux-5.10/drivers/vhost/ |
H A D | vringh.c | 43 u16 avail_idx, i, head; in __vringh_get_head() local 61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); in __vringh_get_head() 63 vringh_bad("Failed to read head: idx %d address %p", in __vringh_get_head() 68 if (head >= vrh->vring.num) { in __vringh_get_head() 70 head, vrh->vring.num); in __vringh_get_head() 75 return head; in __vringh_get_head() 661 * @head: head index we received, for passing to vringh_complete_user(). 667 * *head will be vrh->vring.num. You may be able to ignore an invalid 677 u16 *head) in vringh_getdesc_user() 672 vringh_getdesc_user(struct vringh *vrh, struct vringh_iov *riov, struct vringh_iov *wiov, bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), u16 *head) vringh_getdesc_user() argument 777 vringh_complete_user(struct vringh *vrh, u16 head, u32 len) vringh_complete_user() argument 942 vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) vringh_getdesc_kern() argument 1022 vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) vringh_complete_kern() argument 1294 vringh_getdesc_iotlb(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) vringh_getdesc_iotlb() argument 1380 vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) vringh_complete_iotlb() argument [all...] |
/kernel/linux/linux-6.6/drivers/tty/serial/jsm/ |
H A D | jsm_neo.c | 282 u16 head; in neo_copy_data_from_uart_to_queue() local 285 /* cache head and tail of queue */ in neo_copy_data_from_uart_to_queue() 286 head = ch->ch_r_head & RQUEUEMASK; in neo_copy_data_from_uart_to_queue() 294 qleft = tail - head - 1; in neo_copy_data_from_uart_to_queue() 345 n = min(((u32) total), (RQUEUESIZE - (u32) head)); in neo_copy_data_from_uart_to_queue() 366 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); in neo_copy_data_from_uart_to_queue() 372 memset(ch->ch_equeue + head, 0, n); in neo_copy_data_from_uart_to_queue() 374 /* Add to and flip head if needed */ in neo_copy_data_from_uart_to_queue() 375 head = (head in neo_copy_data_from_uart_to_queue() 477 u16 head; neo_copy_data_from_queue_to_uart() local [all...] |
/kernel/linux/linux-6.6/drivers/vhost/ |
H A D | vringh.c | 43 u16 avail_idx, i, head; in __vringh_get_head() local 61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); in __vringh_get_head() 63 vringh_bad("Failed to read head: idx %d address %p", in __vringh_get_head() 68 if (head >= vrh->vring.num) { in __vringh_get_head() 70 head, vrh->vring.num); in __vringh_get_head() 75 return head; in __vringh_get_head() 689 * @head: head index we received, for passing to vringh_complete_user(). 695 * *head will be vrh->vring.num. You may be able to ignore an invalid 708 u16 *head) in vringh_getdesc_user() 703 vringh_getdesc_user(struct vringh *vrh, struct vringh_iov *riov, struct vringh_iov *wiov, bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), u16 *head) vringh_getdesc_user() argument 808 vringh_complete_user(struct vringh *vrh, u16 head, u32 len) vringh_complete_user() argument 976 vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) vringh_getdesc_kern() argument 1056 vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) vringh_complete_kern() argument 1480 vringh_getdesc_iotlb(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) vringh_getdesc_iotlb() argument 1566 vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) vringh_complete_iotlb() argument [all...] |
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | ioctl.c | 315 c.rw.nsid = cpu_to_le32(ns->head->ns_id); in nvme_submit_io() 331 if (ns && nsid != ns->head->ns_id) { in nvme_validate_passthru_nsid() 335 current->comm, nsid, ns->head->ns_id); in nvme_validate_passthru_nsid() 690 return ns->head->ns_id; in nvme_ns_ioctl() 803 void __user *argp, struct nvme_ns_head *head, int srcu_idx, 805 __releases(&head->srcu) 811 srcu_read_unlock(&head->srcu, srcu_idx); 821 struct nvme_ns_head *head = bdev->bd_disk->private_data; in nvme_ns_head_ioctl() local 831 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_ioctl() 832 ns = nvme_find_path(head); in nvme_ns_head_ioctl() 856 struct nvme_ns_head *head = nvme_ns_head_chr_ioctl() local 881 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev); nvme_ns_head_chr_uring_cmd() local [all...] |
/kernel/linux/linux-5.10/drivers/slimbus/ |
H A D | qcom-ctrl.c | 96 int head; member 137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { in slim_alloc_rxbuf() 156 idx = ctrl->tx.head; in slim_ack_txn() 157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; in slim_ack_txn() 310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { in slim_alloc_txbuf() 335 u32 *head; in qcom_xfer_msg() local 353 head = (u32 *)pbuf; in qcom_xfer_msg() 356 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, in qcom_xfer_msg() 360 *head in qcom_xfer_msg() [all...] |
/kernel/linux/linux-5.10/net/rds/ |
H A D | ib_recv.c | 103 struct rds_ib_cache_head *head; in rds_ib_recv_alloc_cache() local 111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 112 head->first = NULL; in rds_ib_recv_alloc_cache() 113 head->count = 0; in rds_ib_recv_alloc_cache() 138 struct rds_ib_cache_head *head; in rds_ib_cache_splice_all_lists() local 142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists() 143 if (head->first) { in rds_ib_cache_splice_all_lists() 144 list_splice_entire_tail(head->first, caller_list); in rds_ib_cache_splice_all_lists() 145 head->first = NULL; in rds_ib_cache_splice_all_lists() 520 struct list_head *head in rds_ib_recv_cache_get() local [all...] |