Lines Matching refs:ucmd

352 		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
370 if (ucmd) {
371 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
372 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
374 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
556 struct mlx5_ib_create_qp *ucmd,
568 if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
570 ucmd->sq_wqe_count);
574 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
832 struct mlx5_ib_create_wq *ucmd)
842 if (!ucmd->buf_addr)
845 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
852 mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
854 err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
864 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
867 (unsigned long long)ucmd->buf_addr, rwq->buf_size,
870 err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
895 struct mlx5_ib_create_qp *ucmd)
917 uar_index = ucmd->bfreg_index;
922 ucmd->bfreg_index, true);
947 err = set_user_buf_size(dev, qp, ucmd, base, attr);
951 if (ucmd->buf_addr && ubuffer->buf_size) {
952 ubuffer->buf_addr = ucmd->buf_addr;
988 err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
1558 void *ucmd;
1570 struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
1586 if (ucmd->comp_mask) {
1591 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1592 !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1623 if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1628 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1633 switch (ucmd->rx_hash_function) {
1639 if (len != ucmd->rx_key_len) {
1645 memcpy(rss_key, ucmd->rx_hash_key, len);
1653 if (!ucmd->rx_hash_fields_mask) {
1661 if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1662 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1663 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1664 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1670 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1671 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1674 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1675 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1679 outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1680 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1682 ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1683 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1685 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1694 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1695 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1698 else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1699 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1703 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1704 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1707 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1708 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1711 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1712 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1715 if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1716 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1719 if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
1922 struct mlx5_ib_create_qp *ucmd = params->ucmd;
1957 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
1963 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
1964 ucmd->rq_wqe_count != qp->rq.wqe_cnt)
1967 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
1971 &inlen, base, ucmd);
1979 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
2069 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
2407 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2423 MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2426 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
2547 void *ucmd, struct ib_qp_init_attr *attr)
2554 flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
2556 flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
2716 size_t ucmd = sizeof(struct mlx5_ib_create_qp);
2722 params->ucmd_size = ucmd;
2732 params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
2743 ucmd = sizeof(struct mlx5_ib_create_qp_rss);
2744 params->ucmd_size = ucmd;
2745 if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
2748 params->inlen = min(ucmd, inlen);
2839 struct mlx5_ib_create_qp *ucmd = params->ucmd;
2847 return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
2898 "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
2937 params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
2938 if (!params.ucmd)
2941 err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
2955 err = process_vendor_flags(dev, qp, params.ucmd, attr);
2975 kfree(params.ucmd);
2976 params.ucmd = NULL;
3013 kfree(params.ucmd);
3775 const struct mlx5_ib_modify_qp *ucmd,
4020 if (ucmd->burst_info.max_burst_sz) {
4024 ucmd->burst_info.max_burst_sz;
4031 if (ucmd->burst_info.typical_pkt_sz) {
4035 ucmd->burst_info.typical_pkt_sz;
4051 ucmd->ece_options : 0;
4160 int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4177 if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
4184 MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
4328 struct mlx5_ib_modify_qp ucmd = {};
4341 if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
4344 if (udata->inlen > sizeof(ucmd) &&
4345 !ib_is_udata_cleared(udata, sizeof(ucmd),
4346 udata->inlen - sizeof(ucmd)))
4349 if (ib_copy_from_udata(&ucmd, udata,
4350 min(udata->inlen, sizeof(ucmd))))
4353 if (ucmd.comp_mask ||
4354 memchr_inv(&ucmd.burst_info.reserved, 0,
4355 sizeof(ucmd.burst_info.reserved)))
4367 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4425 new_state, &ucmd, &resp, udata);
4978 struct mlx5_ib_create_wq *ucmd,
4985 if (!ucmd->rq_wqe_count)
4988 rwq->wqe_count = ucmd->rq_wqe_count;
4989 rwq->wqe_shift = ucmd->rq_wqe_shift;
5017 struct mlx5_ib_create_wq ucmd = {};
5028 if (udata->inlen > sizeof(ucmd) &&
5029 !ib_is_udata_cleared(udata, sizeof(ucmd),
5030 udata->inlen - sizeof(ucmd))) {
5035 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
5040 if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
5043 } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
5048 if ((ucmd.single_stride_log_num_of_bytes <
5050 (ucmd.single_stride_log_num_of_bytes >
5053 ucmd.single_stride_log_num_of_bytes,
5059 ucmd.single_wqe_log_num_of_strides)) {
5063 ucmd.single_wqe_log_num_of_strides,
5071 ucmd.single_stride_log_num_of_bytes;
5072 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
5073 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
5077 err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
5083 err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5089 rwq->user_index = ucmd.user_index;
5255 struct mlx5_ib_modify_wq ucmd = {};
5268 if (udata->inlen > sizeof(ucmd) &&
5269 !ib_is_udata_cleared(udata, sizeof(ucmd),
5270 udata->inlen - sizeof(ucmd)))
5273 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
5276 if (ucmd.comp_mask || ucmd.reserved)