Lines Matching defs:rwq

815 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
823 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
826 mlx5_ib_db_unmap_user(context, &rwq->db);
827 ib_umem_release(rwq->umem);
831 struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
845 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
846 if (IS_ERR(rwq->umem)) {
848 err = PTR_ERR(rwq->umem);
852 mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
855 &rwq->rq_page_offset);
861 rwq->rq_num_pas = ncont;
862 rwq->page_shift = page_shift;
863 rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
864 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
867 (unsigned long long)ucmd->buf_addr, rwq->buf_size,
870 err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
879 ib_umem_release(rwq->umem);
4826 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
4827 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
4830 if (rwq->ibwq.event_handler) {
4831 event.device = rwq->ibwq.device;
4832 event.element.wq = &rwq->ibwq;
4842 rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
4867 static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
4881 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
4890 MLX5_SET(rqc, rqc, user_index, rwq->user_index);
4896 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
4907 MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
4908 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
4917 MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
4919 rwq->single_stride_log_num_of_bytes -
4922 fw_map[rwq->log_num_strides -
4925 MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
4927 MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
4928 MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
4929 MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
4930 MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
4959 mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
4960 err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
4966 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
4968 rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
4979 struct mlx5_ib_rwq *rwq)
4988 rwq->wqe_count = ucmd->rq_wqe_count;
4989 rwq->wqe_shift = ucmd->rq_wqe_shift;
4990 if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
4993 rwq->log_rq_stride = rwq->wqe_shift;
4994 rwq->log_rq_size = ilog2(rwq->wqe_count);
5014 struct mlx5_ib_rwq *rwq)
5070 rwq->single_stride_log_num_of_bytes =
5072 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
5073 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
5074 rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
5077 err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
5083 err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5089 rwq->user_index = ucmd.user_index;
5098 struct mlx5_ib_rwq *rwq;
5117 rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
5118 if (!rwq)
5120 err = prepare_user_rq(pd, init_attr, udata, rwq);
5123 err = create_rq(rwq, pd, init_attr);
5133 rwq->ibwq.wq_num = rwq->core_qp.qpn;
5134 rwq->ibwq.state = IB_WQS_RESET;
5143 rwq->core_qp.event = mlx5_ib_wq_event;
5144 rwq->ibwq.event_handler = init_attr->event_handler;
5145 return &rwq->ibwq;
5148 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5150 destroy_user_rq(dev, pd, rwq, udata);
5152 kfree(rwq);
5159 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5162 ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5165 destroy_user_rq(dev, wq->pd, rwq, udata);
5166 kfree(rwq);
5254 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5332 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
5334 rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;