Lines Matching refs:srq

12 #include "srq.h"
14 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
16 return mlx5_frag_buf_get_wqe(&srq->fbc, n);
19 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
22 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
26 event.element.srq = ibsrq;
36 type, srq->srqn);
44 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
81 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
83 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
84 if (IS_ERR(srq->umem)) {
86 err = PTR_ERR(srq->umem);
90 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
105 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
107 err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
126 ib_umem_release(srq->umem);
131 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
138 err = mlx5_db_alloc(dev->mdev, &srq->db);
144 if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
151 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
152 &srq->fbc);
154 srq->head = 0;
155 srq->tail = srq->msrq.max - 1;
156 srq->wqe_ctr = 0;
158 for (i = 0; i < srq->msrq.max; i++) {
159 next = get_wqe(srq, i);
161 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
164 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
165 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
170 mlx5_fill_page_frag_array(&srq->buf, in->pas);
172 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
173 if (!srq->wrid) {
177 srq->wq_sig = 0;
179 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
190 mlx5_frag_buf_free(dev->mdev, &srq->buf);
193 mlx5_db_free(dev->mdev, &srq->db);
197 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
205 &srq->db);
206 ib_umem_release(srq->umem);
210 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
212 kvfree(srq->wrid);
213 mlx5_frag_buf_free(dev->mdev, &srq->buf);
214 mlx5_db_free(dev->mdev, &srq->db);
222 struct mlx5_ib_srq *srq = to_msrq(ib_srq);
237 mutex_init(&srq->mutex);
238 spin_lock_init(&srq->lock);
239 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
240 srq->msrq.max_gs = init_attr->attr.max_sge;
243 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
244 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
252 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
254 srq->msrq.wqe_shift = ilog2(desc_size);
255 buf_size = srq->msrq.max * desc_size;
262 err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
264 err = create_srq_kernel(dev, srq, &in, buf_size);
267 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
272 in.log_size = ilog2(srq->msrq.max);
273 in.wqe_shift = srq->msrq.wqe_shift - 4;
274 if (srq->wq_sig)
300 in.db_record = srq->db.dma;
301 err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
308 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
310 srq->msrq.event = mlx5_ib_srq_event;
311 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
315 .srqn = srq->msrq.srqn,
326 init_attr->attr.max_wr = srq->msrq.max - 1;
331 mlx5_cmd_destroy_srq(dev, &srq->msrq);
335 destroy_srq_user(ib_srq->pd, srq, udata);
337 destroy_srq_kernel(dev, srq);
346 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
354 if (attr->srq_limit >= srq->msrq.max)
357 mutex_lock(&srq->mutex);
358 ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
359 mutex_unlock(&srq->mutex);
371 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
379 ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
384 srq_attr->max_wr = srq->msrq.max - 1;
385 srq_attr->max_sge = srq->msrq.max_gs;
392 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
394 struct mlx5_ib_dev *dev = to_mdev(srq->device);
395 struct mlx5_ib_srq *msrq = to_msrq(srq);
403 destroy_srq_user(srq->pd, msrq, udata);
409 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
414 spin_lock(&srq->lock);
416 next = get_wqe(srq, srq->tail);
418 srq->tail = wqe_index;
420 spin_unlock(&srq->lock);
426 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
436 spin_lock_irqsave(&srq->lock, flags);
445 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
451 if (unlikely(srq->head == srq->tail)) {
457 srq->wrid[srq->head] = wr->wr_id;
459 next = get_wqe(srq, srq->head);
460 srq->head = be16_to_cpu(next->next_wqe_index);
469 if (i < srq->msrq.max_avail_gather) {
477 srq->wqe_ctr += nreq;
484 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
487 spin_unlock_irqrestore(&srq->lock, flags);