Lines Matching refs:ucmd
434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
452 if (ucmd) {
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
454 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
638 struct mlx5_ib_create_qp *ucmd,
650 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
652 ucmd->sq_wqe_count);
656 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
882 struct mlx5_ib_create_wq *ucmd)
890 if (!ucmd->buf_addr)
893 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
912 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
917 (unsigned long long)ucmd->buf_addr, rwq->buf_size,
921 err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
946 struct mlx5_ib_create_qp *ucmd)
967 uar_index = ucmd->bfreg_index;
972 ucmd->bfreg_index, true);
997 err = set_user_buf_size(dev, qp, ucmd, base, attr);
1001 if (ucmd->buf_addr && ubuffer->buf_size) {
1002 ubuffer->buf_addr = ucmd->buf_addr;
1047 err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
1708 void *ucmd;
1720 struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
1736 if (ucmd->comp_mask) {
1741 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1742 !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1773 if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1778 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1783 switch (ucmd->rx_hash_function) {
1789 if (len != ucmd->rx_key_len) {
1795 memcpy(rss_key, ucmd->rx_hash_key, len);
1803 if (!ucmd->rx_hash_fields_mask) {
1811 if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1812 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1813 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1814 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1820 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1821 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1824 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1825 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1829 outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1830 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1832 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1833 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1835 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1844 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1845 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1848 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1849 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1853 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1854 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1857 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1858 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1861 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1862 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1865 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1866 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1869 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
2073 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2103 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2109 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2110 ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2113 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2123 &inlen, base, ucmd);
2128 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
2152 ucmd->dci_streams.log_num_concurent);
2154 ucmd->dci_streams.log_num_errored);
2234 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2270 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2276 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2277 ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2280 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2291 &inlen, base, ucmd);
2299 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
2392 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
2734 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2750 MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2753 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
2873 void *ucmd, struct ib_qp_init_attr *attr)
2880 flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
2882 flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
3046 size_t ucmd = sizeof(struct mlx5_ib_create_qp);
3052 params->ucmd_size = ucmd;
3062 params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
3073 ucmd = sizeof(struct mlx5_ib_create_qp_rss);
3074 params->ucmd_size = ucmd;
3075 if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
3078 params->inlen = min(ucmd, inlen);
3177 struct mlx5_ib_create_qp *ucmd = params->ucmd;
3185 return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), ¶ms->uidx);
3235 "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
3272 params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
3273 if (!params.ucmd)
3276 err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
3284 err = process_vendor_flags(dev, qp, params.ucmd, attr);
3304 kfree(params.ucmd);
3305 params.ucmd = NULL;
3332 kfree(params.ucmd);
4106 const struct mlx5_ib_modify_qp *ucmd,
4352 if (ucmd->burst_info.max_burst_sz) {
4356 ucmd->burst_info.max_burst_sz;
4363 if (ucmd->burst_info.typical_pkt_sz) {
4367 ucmd->burst_info.typical_pkt_sz;
4383 ucmd->ece_options : 0;
4492 int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4509 if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
4516 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
4660 struct mlx5_ib_modify_qp ucmd = {};
4675 if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
4678 if (udata->inlen > sizeof(ucmd) &&
4679 !ib_is_udata_cleared(udata, sizeof(ucmd),
4680 udata->inlen - sizeof(ucmd)))
4683 if (ib_copy_from_udata(&ucmd, udata,
4684 min(udata->inlen, sizeof(ucmd))))
4687 if (ucmd.comp_mask ||
4688 memchr_inv(&ucmd.burst_info.reserved, 0,
4689 sizeof(ucmd.burst_info.reserved)))
4700 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4750 new_state, &ucmd, &resp, udata);
5311 struct mlx5_ib_create_wq *ucmd,
5318 if (!ucmd->rq_wqe_count)
5321 rwq->wqe_count = ucmd->rq_wqe_count;
5322 rwq->wqe_shift = ucmd->rq_wqe_shift;
5350 struct mlx5_ib_create_wq ucmd = {};
5361 if (udata->inlen > sizeof(ucmd) &&
5362 !ib_is_udata_cleared(udata, sizeof(ucmd),
5363 udata->inlen - sizeof(ucmd))) {
5368 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
5373 if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
5376 } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
5381 if ((ucmd.single_stride_log_num_of_bytes <
5383 (ucmd.single_stride_log_num_of_bytes >
5386 ucmd.single_stride_log_num_of_bytes,
5392 ucmd.single_wqe_log_num_of_strides)) {
5396 ucmd.single_wqe_log_num_of_strides,
5404 ucmd.single_stride_log_num_of_bytes;
5405 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
5406 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
5410 err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
5416 err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5422 rwq->user_index = ucmd.user_index;
5588 struct mlx5_ib_modify_wq ucmd = {};
5601 if (udata->inlen > sizeof(ucmd) &&
5602 !ib_is_udata_cleared(udata, sizeof(ucmd),
5603 udata->inlen - sizeof(ucmd)))
5606 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
5609 if (ucmd.comp_mask || ucmd.reserved)