/kernel/linux/linux-5.10/drivers/media/dvb-frontends/ |
H A D | dib3000mb.c | 148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend() 153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend() 157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend() 169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend() 173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend() 177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend() 181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend() 193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend() 200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend() 209 wr(DIB3000MB_REG_QA in dib3000mb_set_frontend() [all...] |
/kernel/linux/linux-6.6/drivers/media/dvb-frontends/ |
H A D | dib3000mb.c | 148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend() 153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend() 157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend() 169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend() 173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend() 177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend() 181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend() 193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend() 200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend() 209 wr(DIB3000MB_REG_QA in dib3000mb_set_frontend() [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | decompress_unlzma.c | 292 static inline size_t INIT get_pos(struct writer *wr) in get_pos() argument 295 wr->global_pos + wr->buffer_pos; in get_pos() 298 static inline uint8_t INIT peek_old_byte(struct writer *wr, in peek_old_byte() argument 301 if (!wr->flush) { in peek_old_byte() 303 while (offs > wr->header->dict_size) in peek_old_byte() 304 offs -= wr->header->dict_size; in peek_old_byte() 305 pos = wr->buffer_pos - offs; in peek_old_byte() 306 return wr->buffer[pos]; in peek_old_byte() 308 uint32_t pos = wr in peek_old_byte() 316 write_byte(struct writer *wr, uint8_t byte) write_byte() argument 330 copy_byte(struct writer *wr, uint32_t offs) copy_byte() argument 335 copy_bytes(struct writer *wr, uint32_t rep0, int len) copy_bytes() argument 347 process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) process_bit0() argument 391 process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) process_bit1() argument 553 struct writer wr; unlzma() local [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | decompress_unlzma.c | 292 static inline size_t INIT get_pos(struct writer *wr) in get_pos() argument 295 wr->global_pos + wr->buffer_pos; in get_pos() 298 static inline uint8_t INIT peek_old_byte(struct writer *wr, in peek_old_byte() argument 301 if (!wr->flush) { in peek_old_byte() 303 while (offs > wr->header->dict_size) in peek_old_byte() 304 offs -= wr->header->dict_size; in peek_old_byte() 305 pos = wr->buffer_pos - offs; in peek_old_byte() 306 return wr->buffer[pos]; in peek_old_byte() 308 uint32_t pos = wr in peek_old_byte() 316 write_byte(struct writer *wr, uint8_t byte) write_byte() argument 330 copy_byte(struct writer *wr, uint32_t offs) copy_byte() argument 335 copy_bytes(struct writer *wr, uint32_t rep0, int len) copy_bytes() argument 347 process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) process_bit0() argument 391 process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) process_bit1() argument 553 struct writer wr; unlzma() local [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | ib_mad.h | 24 TP_PROTO(struct ib_mad_send_wr_private *wr, 26 TP_ARGS(wr, qp_info), 56 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 57 __entry->port_num = wr->mad_agent_priv->agent.port_num; 58 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 59 __entry->agent_priv = wr->mad_agent_priv; 60 __entry->wrtid = wr->tid; 61 __entry->max_retries = wr->max_retries; 62 __entry->retries_left = wr->retries_left; 63 __entry->retry = wr [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | ib_mad.h | 24 TP_PROTO(struct ib_mad_send_wr_private *wr, 26 TP_ARGS(wr, qp_info), 55 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 56 __entry->port_num = wr->mad_agent_priv->agent.port_num; 57 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 58 __entry->agent_priv = wr->mad_agent_priv; 59 __entry->wrtid = wr->tid; 60 __entry->max_retries = wr->max_retries; 61 __entry->retries_left = wr->retries_left; 62 __entry->retry = wr [all...] |
/kernel/linux/linux-5.10/tools/bpf/bpftool/ |
H A D | json_writer.c | 311 json_writer_t *wr = jsonw_new(stdout); in main() local 313 jsonw_start_object(wr); in main() 314 jsonw_pretty(wr, true); in main() 315 jsonw_name(wr, "Vyatta"); in main() 316 jsonw_start_object(wr); in main() 317 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 318 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 319 jsonw_float_field(wr, "stock", 8.16); in main() 321 jsonw_name(wr, "ARGV"); in main() 322 jsonw_start_array(wr); in main() [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/ |
H A D | json_writer.c | 311 json_writer_t *wr = jsonw_new(stdout); in main() local 313 jsonw_start_object(wr); in main() 314 jsonw_pretty(wr, true); in main() 315 jsonw_name(wr, "Vyatta"); in main() 316 jsonw_start_object(wr); in main() 317 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 318 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 319 jsonw_float_field(wr, "stock", 8.16); in main() 321 jsonw_name(wr, "ARGV"); in main() 322 jsonw_start_array(wr); in main() [all...] |
/kernel/linux/linux-6.6/tools/bpf/bpftool/ |
H A D | json_writer.c | 311 json_writer_t *wr = jsonw_new(stdout); in main() local 313 jsonw_start_object(wr); in main() 314 jsonw_pretty(wr, true); in main() 315 jsonw_name(wr, "Vyatta"); in main() 316 jsonw_start_object(wr); in main() 317 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 318 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 319 jsonw_float_field(wr, "stock", 8.16); in main() 321 jsonw_name(wr, "ARGV"); in main() 322 jsonw_start_array(wr); in main() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 9 #include "wr.h" 104 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_eth_seg() argument 111 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg() 115 if (wr->opcode == IB_WR_LSO) { in set_eth_seg() 116 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg() 153 const struct ib_send_wr *wr) in set_datagram_seg() 155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg() 157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg() 158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr) in set_datagram_seg() 152 set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, const struct ib_send_wr *wr) set_datagram_seg() argument 319 set_reg_umr_segment(struct mlx5_ib_dev *dev, struct mlx5_wqe_umr_ctrl_seg *umr, const struct ib_send_wr *wr) set_reg_umr_segment() argument 401 set_reg_mkey_segment(struct mlx5_ib_dev *dev, struct mlx5_mkey_seg *seg, const struct ib_send_wr *wr) set_reg_mkey_segment() argument 449 send_ieth(const struct ib_send_wr *wr) send_ieth() argument 481 set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **wqe, int *wqe_sz, void **cur_edge) set_data_inl_seg() argument 782 const struct ib_reg_wr *wr = reg_wr(send_wr); set_pi_umr_wr() local 856 set_reg_wr(struct mlx5_ib_qp *qp, const struct ib_reg_wr *wr, void **seg, int *size, void **cur_edge, bool check_not_free) set_reg_wr() argument 942 __begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq, bool send_signaled, bool solicited) __begin_wqe() argument 967 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) begin_wqe() argument 1008 handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) handle_rdma_op() argument 1015 handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) handle_local_inv() argument 1024 handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) handle_reg_mr() argument 1033 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) handle_psv() argument 1064 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) handle_reg_mr_integrity() argument 1152 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) handle_qpt_rc() argument 1203 handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) handle_qpt_uc() argument 1215 handle_qpt_hw_gsi(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) handle_qpt_hw_gsi() argument 1225 handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) handle_qpt_ud() argument 1246 handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) handle_qpt_reg_umr() argument 1275 mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, bool drain) mlx5_ib_post_send() argument 1459 mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr, bool drain) mlx5_ib_post_recv() argument [all...] |
H A D | wr.h | 44 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 46 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 50 const struct ib_send_wr *wr, in mlx5_ib_post_send_nodrain() 53 return mlx5_ib_post_send(ibqp, wr, bad_wr, false); in mlx5_ib_post_send_nodrain() 57 const struct ib_send_wr *wr, in mlx5_ib_post_send_drain() 60 return mlx5_ib_post_send(ibqp, wr, bad_wr, true); in mlx5_ib_post_send_drain() 64 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_nodrain() 67 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false); in mlx5_ib_post_recv_nodrain() 71 const struct ib_recv_wr *wr, in mlx5_ib_post_recv_drain() 74 return mlx5_ib_post_recv(ibqp, wr, bad_w in mlx5_ib_post_recv_drain() 49 mlx5_ib_post_send_nodrain(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) mlx5_ib_post_send_nodrain() argument 56 mlx5_ib_post_send_drain(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) mlx5_ib_post_send_drain() argument 63 mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) mlx5_ib_post_recv_nodrain() argument 70 mlx5_ib_post_recv_drain(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) mlx5_ib_post_recv_drain() argument [all...] |
H A D | gsi.c | 51 struct mlx5_ib_gsi_wr *wr; in generate_completions() local 56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions() 58 if (!wr->completed) in generate_completions() 61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions() 62 wr->completed = false; in generate_completions() 71 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local 78 wr->completed = true; in handle_single_completion() 79 wr_id = wr->wc.wr_id; in handle_single_completion() 80 wr->wc = *wc; in handle_single_completion() 81 wr in handle_single_completion() 388 mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr, struct ib_wc *wc) mlx5_ib_add_outstanding_wr() argument 420 mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr) mlx5_ib_gsi_silent_drop() argument 440 get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) get_tx_qp() argument 458 mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) mlx5_ib_gsi_post_send() argument 503 mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) mlx5_ib_gsi_post_recv() argument [all...] |
/kernel/linux/linux-5.10/fs/orangefs/ |
H A D | inode.c | 22 struct orangefs_write_range *wr = NULL; in orangefs_writepage_locked() local 33 wr = (struct orangefs_write_range *)page_private(page); in orangefs_writepage_locked() 34 WARN_ON(wr->pos >= len); in orangefs_writepage_locked() 35 off = wr->pos; in orangefs_writepage_locked() 36 if (off + wr->len > len) in orangefs_writepage_locked() 39 wlen = wr->len; in orangefs_writepage_locked() 58 len, wr, NULL, NULL); in orangefs_writepage_locked() 93 struct orangefs_write_range *wrp, wr; in orangefs_writepages_work() local 121 wr.uid = ow->uid; in orangefs_writepages_work() 122 wr in orangefs_writepages_work() 160 struct orangefs_write_range *wr; orangefs_writepages_callback() local 361 struct orangefs_write_range *wr; orangefs_write_begin() local 385 struct orangefs_write_range *wr; orangefs_write_begin() local 451 struct orangefs_write_range *wr; orangefs_invalidatepage() local 681 struct orangefs_write_range *wr; orangefs_page_mkwrite() local [all...] |
/kernel/linux/linux-6.6/fs/orangefs/ |
H A D | inode.c | 23 struct orangefs_write_range *wr = NULL; in orangefs_writepage_locked() local 34 wr = (struct orangefs_write_range *)page_private(page); in orangefs_writepage_locked() 35 WARN_ON(wr->pos >= len); in orangefs_writepage_locked() 36 off = wr->pos; in orangefs_writepage_locked() 37 if (off + wr->len > len) in orangefs_writepage_locked() 40 wlen = wr->len; in orangefs_writepage_locked() 57 len, wr, NULL, NULL); in orangefs_writepage_locked() 92 struct orangefs_write_range *wrp, wr; in orangefs_writepages_work() local 116 wr.uid = ow->uid; in orangefs_writepages_work() 117 wr in orangefs_writepages_work() 155 struct orangefs_write_range *wr = folio->private; orangefs_writepages_callback() local 321 struct orangefs_write_range *wr; orangefs_write_begin() local 347 struct orangefs_write_range *wr; orangefs_write_begin() local 412 struct orangefs_write_range *wr = folio_get_private(folio); orangefs_invalidate_folio() local 640 struct orangefs_write_range *wr; orangefs_page_mkwrite() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 655 const struct ib_reg_wr *wr) in set_reg_seg() 657 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg() 659 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg() 660 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg() 661 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg() 662 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg() 663 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg() 664 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg() 665 wqe_hdr->wr in set_reg_seg() 654 set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, const struct ib_reg_wr *wr) set_reg_seg() argument 679 pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) pvrdma_post_send() argument 889 pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) pvrdma_post_recv() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | rw.c | 80 reg->inv_wr.next = ®->reg_wr.wr; in rdma_rw_inv_key() 111 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr() 151 prev->wr.wr.next = ®->inv_wr; in rdma_rw_init_mr_wrs() 153 prev->wr.wr.next = ®->reg_wr.wr; in rdma_rw_init_mr_wrs() 156 reg->reg_wr.wr.next = ®->wr.wr; in rdma_rw_init_mr_wrs() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | rw.c | 81 reg->inv_wr.next = ®->reg_wr.wr; in rdma_rw_inv_key() 112 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr() 152 prev->wr.wr.next = ®->inv_wr; in rdma_rw_init_mr_wrs() 154 prev->wr.wr.next = ®->reg_wr.wr; in rdma_rw_init_mr_wrs() 157 reg->reg_wr.wr.next = ®->wr.wr; in rdma_rw_init_mr_wrs() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 647 const struct ib_reg_wr *wr) in set_reg_seg() 649 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg() 651 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg() 652 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg() 653 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg() 654 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg() 655 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg() 656 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg() 657 wqe_hdr->wr in set_reg_seg() 646 set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, const struct ib_reg_wr *wr) set_reg_seg() argument 671 pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) pvrdma_post_send() argument 873 pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) pvrdma_post_recv() argument [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/breakpoints/ |
H A D | breakpoint_test_arm64.c | 33 static void child(int size, int wr) in child() argument 35 volatile uint8_t *addr = &var[32 + wr]; in child() 112 static bool run_test(int wr_size, int wp_size, int wr, int wp) in run_test() argument 125 child(wr_size, wr); in run_test() 204 int wr, wp, size; in main() local 215 for (wr = 0; wr <= 32; wr = wr + size) { in main() 216 for (wp = wr in main() [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/breakpoints/ |
H A D | breakpoint_test_arm64.c | 33 static void child(int size, int wr) in child() argument 35 volatile uint8_t *addr = &var[32 + wr]; in child() 112 static bool run_test(int wr_size, int wp_size, int wr, int wp) in run_test() argument 125 child(wr_size, wr); in run_test() 204 int wr, wp, size; in main() local 215 for (wr = 0; wr <= 32; wr = wr + size) { in main() 216 for (wp = wr in main() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 243 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local 254 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 266 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr() 267 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr() 268 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr() 269 wr in iser_reg_sig_mr() 299 struct ib_reg_wr *wr = &tx_desc->reg_wr; iser_fast_reg_mr() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 51 struct mlx5_ib_gsi_wr *wr; in generate_completions() local 56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions() 58 if (!wr->completed) in generate_completions() 61 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions() 62 wr->completed = false; in generate_completions() 71 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local 78 wr->completed = true; in handle_single_completion() 79 wr_id = wr->wc.wr_id; in handle_single_completion() 80 wr->wc = *wc; in handle_single_completion() 81 wr in handle_single_completion() 359 mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr, struct ib_wc *wc) mlx5_ib_add_outstanding_wr() argument 391 mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr) mlx5_ib_gsi_silent_drop() argument 411 get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) get_tx_qp() argument 429 mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) mlx5_ib_gsi_post_send() argument 474 mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) mlx5_ib_gsi_post_recv() argument [all...] |
H A D | wr.c | 9 #include "wr.h" 54 static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp, in set_eth_seg() argument 61 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg() 65 if (wr->opcode == IB_WR_LSO) { in set_eth_seg() 66 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); in set_eth_seg() 103 const struct ib_send_wr *wr) in set_datagram_seg() 105 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg() 107 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg() 108 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr) in set_datagram_seg() 102 set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, const struct ib_send_wr *wr) set_datagram_seg() argument 228 send_ieth(const struct ib_send_wr *wr) send_ieth() argument 260 set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **wqe, int *wqe_sz, void **cur_edge) set_data_inl_seg() argument 561 const struct ib_reg_wr *wr = reg_wr(send_wr); set_pi_umr_wr() local 635 set_reg_wr(struct mlx5_ib_qp *qp, const struct ib_reg_wr *wr, void **seg, int *size, void **cur_edge, bool check_not_free) set_reg_wr() argument 748 begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, const struct ib_send_wr *wr, unsigned int *idx, int *size, void **cur_edge, int nreq) begin_wqe() argument 787 handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size) handle_rdma_op() argument 794 handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) handle_local_inv() argument 803 handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int idx) handle_reg_mr() argument 812 handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, struct ib_sig_domain *domain, u32 psv_index, u8 next_fence) handle_psv() argument 843 handle_reg_mr_integrity(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence) handle_reg_mr_integrity() argument 931 handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size, void **cur_edge, unsigned int *idx, int nreq, u8 fence, u8 next_fence, int *num_sge) handle_qpt_rc() argument 982 handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size) handle_qpt_uc() argument 994 handle_qpt_hw_gsi(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) handle_qpt_hw_gsi() argument 1004 handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **seg, int *size, void **cur_edge) handle_qpt_ud() argument 1051 mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr, bool drain) mlx5_ib_post_send() argument 1206 mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr, bool drain) mlx5_ib_post_recv() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/iser/ |
H A D | iser_memory.c | 257 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local 268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 280 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr() 281 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr() 282 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr() 283 wr in iser_reg_sig_mr() 313 struct ib_reg_wr *wr = &tx_desc->reg_wr; iser_fast_reg_mr() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_mw.c | 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr in rxe_check_bind_mw() [all...] |