Lines Matching defs:rwq

865 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
873 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
876 mlx5_ib_db_unmap_user(context, &rwq->db);
877 ib_umem_release(rwq->umem);
881 struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
893 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
894 if (IS_ERR(rwq->umem)) {
896 err = PTR_ERR(rwq->umem);
901 rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
902 page_offset, 64, &rwq->rq_page_offset);
909 rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
910 rwq->page_shift = order_base_2(page_size);
911 rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT;
912 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
917 (unsigned long long)ucmd->buf_addr, rwq->buf_size,
918 ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
921 err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
930 ib_umem_release(rwq->umem);
5153 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
5154 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
5157 if (rwq->ibwq.event_handler) {
5158 event.device = rwq->ibwq.device;
5159 event.element.wq = &rwq->ibwq;
5169 rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
5194 static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
5213 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
5223 MLX5_SET(rqc, rqc, user_index, rwq->user_index);
5229 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
5240 MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
5241 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
5250 MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
5252 rwq->single_stride_log_num_of_bytes -
5255 fw_map[rwq->log_num_strides -
5258 MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
5260 MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
5261 MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
5262 MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
5263 MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
5292 mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
5293 err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
5299 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5301 rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
5312 struct mlx5_ib_rwq *rwq)
5321 rwq->wqe_count = ucmd->rq_wqe_count;
5322 rwq->wqe_shift = ucmd->rq_wqe_shift;
5323 if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
5326 rwq->log_rq_stride = rwq->wqe_shift;
5327 rwq->log_rq_size = ilog2(rwq->wqe_count);
5347 struct mlx5_ib_rwq *rwq)
5403 rwq->single_stride_log_num_of_bytes =
5405 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
5406 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
5407 rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
5410 err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
5416 err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5422 rwq->user_index = ucmd.user_index;
5431 struct mlx5_ib_rwq *rwq;
5450 rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
5451 if (!rwq)
5453 err = prepare_user_rq(pd, init_attr, udata, rwq);
5456 err = create_rq(rwq, pd, init_attr);
5466 rwq->ibwq.wq_num = rwq->core_qp.qpn;
5467 rwq->ibwq.state = IB_WQS_RESET;
5476 rwq->core_qp.event = mlx5_ib_wq_event;
5477 rwq->ibwq.event_handler = init_attr->event_handler;
5478 return &rwq->ibwq;
5481 mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5483 destroy_user_rq(dev, pd, rwq, udata);
5485 kfree(rwq);
5492 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5495 ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5498 destroy_user_rq(dev, wq->pd, rwq, udata);
5499 kfree(rwq);
5587 struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5664 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
5666 rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;