/kernel/linux/linux-6.6/net/ipv4/ |
H A D | inet_connection_sock.c | 896 struct request_sock *nreq; in inet_reqsk_clone() local 898 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); in inet_reqsk_clone() 899 if (!nreq) { in inet_reqsk_clone() 908 nreq_sk = req_to_sk(nreq); in inet_reqsk_clone() 922 nreq->rsk_listener = sk; in inet_reqsk_clone() 927 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) in inet_reqsk_clone() 928 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); in inet_reqsk_clone() 930 return nreq; in inet_reqsk_clone() 993 struct request_sock *nreq in reqsk_timer_handler() local 1335 struct request_sock *nreq; inet_csk_complete_hashdance() local 1390 struct request_sock *nreq; inet_csk_listen_stop() local [all...] |
/kernel/linux/linux-5.10/crypto/ |
H A D | echainiv.c | 45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt() 47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in echainiv_encrypt() 48 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt() 50 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt() 54 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
|
H A D | seqiv.c | 69 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt() 71 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt() 72 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt() 74 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt() 78 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
|
/kernel/linux/linux-6.6/crypto/ |
H A D | echainiv.c | 45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt() 47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in echainiv_encrypt() 48 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt() 50 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt() 54 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
|
H A D | seqiv.c | 68 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt() 70 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt() 71 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt() 73 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt() 77 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 29 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5r_wq_overflow() argument 35 if (likely(cur + nreq < wq->max_post)) in mlx5r_wq_overflow() 43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow() 726 int *size, void **cur_edge, int nreq, __be32 general_id, in mlx5r_begin_wqe() 729 if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in mlx5r_begin_wqe() 751 void **cur_edge, int nreq) in begin_wqe() 753 return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq, in begin_wqe() 760 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) in mlx5r_finish_wqe() 773 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in mlx5r_finish_wqe() 815 void **cur_edge, unsigned int *idx, int nreq, in handle_psv() 724 mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx, int *size, void **cur_edge, int nreq, __be32 general_id, bool send_signaled, bool solicited) mlx5r_begin_wqe() argument 748 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) begin_wqe() argument 758 mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, void *seg, u8 size, void *cur_edge, unsigned int idx, u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) mlx5r_finish_wqe() argument 812 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) handle_psv() argument 843 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) handle_reg_mr_integrity() argument 931 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) handle_qpt_rc() argument 1025 mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq, struct mlx5_wqe_ctrl_seg *ctrl) mlx5r_ring_db() argument 1066 int nreq; mlx5_ib_post_send() local 1216 int nreq; mlx5_ib_post_recv() local [all...] |
H A D | wr.h | 94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq); 97 int *size, void **cur_edge, int nreq, __be32 general_id, 101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode); 102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
|
H A D | srq.c | 412 int nreq; in mlx5_ib_post_srq_recv() local 423 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 455 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 456 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 78 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, in mlx5_wq_overflow() argument 85 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 93 return cur + nreq >= wq->max_post; in mlx5_wq_overflow() 945 int *size, void **cur_edge, int nreq, in __begin_wqe() 948 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in __begin_wqe() 970 void **cur_edge, int nreq) in begin_wqe() 972 return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, in begin_wqe() 980 unsigned int idx, u64 wr_id, int nreq, u8 fence, in finish_wqe() 994 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe() 1036 void **cur_edge, unsigned int *idx, int nreq, in handle_psv() 942 __begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq, bool send_signaled, bool solicited) __begin_wqe() argument 967 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) begin_wqe() argument 977 finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, void *seg, u8 size, void *cur_edge, unsigned int idx, u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode) finish_wqe() argument 1033 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) handle_psv() argument 1064 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) handle_reg_mr_integrity() argument 1152 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) handle_qpt_rc() argument 1291 int nreq; mlx5_ib_post_send() local 1469 int nreq; mlx5_ib_post_recv() local [all...] |
H A D | srq.c | 433 int nreq; in mlx5_ib_post_srq_recv() local 444 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 476 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 477 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
/kernel/linux/linux-5.10/arch/ia64/include/asm/ |
H A D | perfmon.h | 87 extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); 88 extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); 89 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); 90 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, w in mthca_arbel_post_srq_recv() [all...] |
H A D | mthca_qp.c | 1565 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1572 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1580 return cur + nreq >= wq->max; in mthca_wq_overflow() 1631 int nreq; in mthca_tavor_post_send() local 1635 * f0 and size0 are only used if nreq != 0, and they will in mthca_tavor_post_send() 1637 * before nreq is incremented. So nreq cannot become non-zero in mthca_tavor_post_send() 1652 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1653 if (mthca_wq_overflow(&qp->sq, nreq, q in mthca_tavor_post_send() 1827 int nreq; mthca_tavor_post_receive() local 1935 int nreq; mthca_arbel_post_send() local 2166 int nreq; mthca_arbel_post_receive() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, w in mthca_arbel_post_srq_recv() [all...] |
H A D | mthca_qp.c | 1568 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1575 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1583 return cur + nreq >= wq->max; in mthca_wq_overflow() 1634 int nreq; in mthca_tavor_post_send() local 1638 * f0 and size0 are only used if nreq != 0, and they will in mthca_tavor_post_send() 1640 * before nreq is incremented. So nreq cannot become non-zero in mthca_tavor_post_send() 1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1656 if (mthca_wq_overflow(&qp->sq, nreq, q in mthca_tavor_post_send() 1830 int nreq; mthca_tavor_post_receive() local 1938 int nreq; mthca_arbel_post_send() local 2169 int nreq; mthca_arbel_post_receive() local [all...] |
/kernel/linux/linux-6.6/drivers/usb/gadget/function/ |
H A D | uvc_queue.c | 48 unsigned int nreq; in uvc_queue_setup() local 64 nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size); in uvc_queue_setup() 65 nreq = clamp(nreq, 4U, 64U); in uvc_queue_setup() 66 video->uvc_num_requests = nreq; in uvc_queue_setup()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 311 int nreq; in mlx4_ib_post_srq_recv() local 319 nreq = 0; in mlx4_ib_post_srq_recv() 323 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 355 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 356 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 315 int nreq; in mlx4_ib_post_srq_recv() local 326 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 358 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 359 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
/kernel/linux/linux-5.10/fs/nfs/ |
H A D | pnfs_nfs.c | 459 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local 471 nreq++; in pnfs_bucket_alloc_ds_commits() 475 return nreq; in pnfs_bucket_alloc_ds_commits() 480 return nreq; in pnfs_bucket_alloc_ds_commits() 515 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local 526 nreq++; in pnfs_generic_commit_pagelist() 529 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist() 530 if (nreq == 0) in pnfs_generic_commit_pagelist()
|
/kernel/linux/linux-6.6/fs/nfs/ |
H A D | pnfs_nfs.c | 459 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local 471 nreq++; in pnfs_bucket_alloc_ds_commits() 475 return nreq; in pnfs_bucket_alloc_ds_commits() 480 return nreq; in pnfs_bucket_alloc_ds_commits() 515 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local 526 nreq++; in pnfs_generic_commit_pagelist() 529 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist() 530 if (nreq == 0) in pnfs_generic_commit_pagelist()
|
/kernel/linux/linux-5.10/drivers/crypto/inside-secure/ |
H A D | safexcel.c | 816 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 856 nreq++; in safexcel_dequeue() 867 if (!nreq) in safexcel_dequeue() 872 priv->ring[ring].requests += nreq; in safexcel_dequeue() 1019 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 1025 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 1026 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 1027 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 1028 if (!nreq) in safexcel_handle_result_descriptor() 1031 for (i = 0; i < nreq; in safexcel_handle_result_descriptor() [all...] |
/kernel/linux/linux-6.6/drivers/crypto/inside-secure/ |
H A D | safexcel.c | 824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 864 nreq++; in safexcel_dequeue() 875 if (!nreq) in safexcel_dequeue() 880 priv->ring[ring].requests += nreq; in safexcel_dequeue() 1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 1025 if (!nreq) in safexcel_handle_result_descriptor() 1028 for (i = 0; i < nreq; in safexcel_handle_result_descriptor() [all...] |
/kernel/linux/linux-5.10/fs/nilfs2/ |
H A D | btree.c | 1739 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() 1766 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert() 1767 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1768 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1772 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert() 1785 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1799 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() 1817 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert() 1819 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert() 1835 tmpptr = nreq in nilfs_btree_commit_convert_and_insert() 1737 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) nilfs_btree_prepare_convert_and_insert() argument 1794 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) nilfs_btree_commit_convert_and_insert() argument 1871 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; nilfs_btree_convert_and_insert() local [all...] |
/kernel/linux/linux-6.6/fs/nilfs2/ |
H A D | btree.c | 1736 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() 1763 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert() 1764 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1765 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1769 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert() 1782 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1796 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() 1814 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert() 1816 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert() 1832 tmpptr = nreq in nilfs_btree_commit_convert_and_insert() 1734 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) nilfs_btree_prepare_convert_and_insert() argument 1791 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) nilfs_btree_commit_convert_and_insert() argument 1868 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; nilfs_btree_convert_and_insert() local [all...] |
/kernel/linux/linux-5.10/drivers/dma/ |
H A D | bcm-sba-raid.c | 297 struct sba_request *nreq; in sba_free_chained_requests() local 303 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests() 304 _sba_free_request(sba, nreq); in sba_free_chained_requests() 420 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local 442 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request() 443 _sba_free_request(sba, nreq); in sba_process_received_request() 528 struct sba_request *req, *nreq; in sba_tx_submit() local 540 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit() 541 _sba_pending_request(sba, nreq); in sba_tx_submit()
|