Lines Matching defs:mvq

354 		       struct mlx5_vdpa_virtqueue *mvq, u32 num_ent)
360 vqp = fw ? &mvq->fwqp : &mvq->vqqp;
380 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn);
399 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
410 vqp = &mvq->vqqp;
411 err = rq_buf_alloc(ndev, vqp, mvq->num_ent);
427 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent);
445 rx_post(vqp, mvq->num_ent);
491 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
493 struct mlx5_vdpa_net *ndev = mvq->ndev;
496 event_cb = &ndev->event_cbs[mvq->index];
497 mlx5_cq_set_ci(&mvq->cq.mcq);
503 rx_post(&mvq->vqqp, num);
510 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq);
511 struct mlx5_vdpa_net *ndev = mvq->ndev;
515 while (!mlx5_vdpa_poll_one(&mvq->cq)) {
517 if (num > mvq->num_ent / 2) {
524 mlx5_vdpa_handle_completions(mvq, num);
530 mlx5_vdpa_handle_completions(mvq, num);
532 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
537 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
541 struct mlx5_vdpa_cq *vcq = &mvq->cq;
599 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index);
614 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx];
616 struct mlx5_vdpa_cq *vcq = &mvq->cq;
666 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
676 *umemp = &mvq->umem1;
681 *umemp = &mvq->umem2;
686 *umemp = &mvq->umem3;
690 (*umemp)->size = p_a * mvq->num_ent + p_b;
698 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
708 set_umem_size(ndev, mvq, num, &umem);
748 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num)
756 umem = &mvq->umem1;
759 umem = &mvq->umem2;
762 umem = &mvq->umem3;
774 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
780 err = create_umem(ndev, mvq, num);
788 umem_destroy(ndev, mvq, num);
793 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
798 umem_destroy(ndev, mvq, num);
858 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
869 err = umems_create(ndev, mvq);
887 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
888 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
896 if (vq_is_tx(mvq->index))
899 if (mvq->map.virq) {
901 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index);
904 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
907 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index);
908 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
911 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
912 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
913 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
915 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
916 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
917 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
918 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size);
919 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
920 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
923 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id);
929 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT;
931 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
938 umems_destroy(ndev, mvq);
942 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
949 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id);
954 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
957 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
958 umems_destroy(ndev, mvq);
961 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
963 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn;
966 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
968 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn;
1066 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd)
1074 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw));
1083 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1087 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP);
1091 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP);
1095 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP);
1099 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP);
1103 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP);
1107 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP);
1111 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP);
1120 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
1138 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1171 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
1180 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
1183 if (!is_valid_state_change(mvq->fw_state, state))
1194 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id);
1204 mvq->fw_state = state;
1209 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1229 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1234 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1243 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id);
1247 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
1261 struct mlx5_vdpa_virtqueue *mvq)
1272 dev_name(&ndev->mvdev.vdev.dev), mvq->index);
1273 ent->dev_id = &ndev->event_cbs[mvq->index];
1280 mvq->map = ent->map;
1287 struct mlx5_vdpa_virtqueue *mvq)
1293 if (mvq->map.virq == irqp->entries[i].map.virq) {
1294 free_irq(mvq->map.virq, irqp->entries[i].dev_id);
1300 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1302 u16 idx = mvq->index;
1305 if (!mvq->num_ent)
1308 if (mvq->initialized)
1311 err = cq_create(ndev, idx, mvq->num_ent);
1315 err = qp_create(ndev, mvq, &mvq->fwqp);
1319 err = qp_create(ndev, mvq, &mvq->vqqp);
1323 err = connect_qps(ndev, mvq);
1327 err = counter_set_alloc(ndev, mvq);
1331 alloc_vector(ndev, mvq);
1332 err = create_virtqueue(ndev, mvq);
1336 if (mvq->ready) {
1337 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
1345 mvq->initialized = true;
1349 destroy_virtqueue(ndev, mvq);
1351 dealloc_vector(ndev, mvq);
1352 counter_set_dealloc(ndev, mvq);
1354 qp_destroy(ndev, &mvq->vqqp);
1356 qp_destroy(ndev, &mvq->fwqp);
1362 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1366 if (!mvq->initialized)
1369 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
1372 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
1375 if (query_virtqueue(ndev, mvq, &attr)) {
1379 mvq->avail_idx = attr.available_index;
1380 mvq->used_idx = attr.used_index;
1391 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1393 if (!mvq->initialized)
1396 suspend_vq(ndev, mvq);
1397 destroy_virtqueue(ndev, mvq);
1398 dealloc_vector(ndev, mvq);
1399 counter_set_dealloc(ndev, mvq);
1400 qp_destroy(ndev, &mvq->vqqp);
1401 qp_destroy(ndev, &mvq->fwqp);
1402 cq_destroy(ndev, mvq->index);
1403 mvq->initialized = false;
2087 struct mlx5_vdpa_virtqueue *mvq;
2100 mvq = &ndev->vqs[idx];
2101 if (unlikely(!mvq->ready))
2112 struct mlx5_vdpa_virtqueue *mvq;
2124 mvq = &ndev->vqs[idx];
2125 mvq->desc_addr = desc_area;
2126 mvq->device_addr = device_area;
2127 mvq->driver_addr = driver_area;
2135 struct mlx5_vdpa_virtqueue *mvq;
2147 mvq = &ndev->vqs[idx];
2148 mvq->num_ent = num;
2186 struct mlx5_vdpa_virtqueue *mvq;
2200 mvq = &ndev->vqs[idx];
2202 suspend_vq(ndev, mvq);
2204 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
2212 mvq->ready = ready;
2234 struct mlx5_vdpa_virtqueue *mvq;
2244 mvq = &ndev->vqs[idx];
2245 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) {
2250 mvq->used_idx = state->split.avail_index;
2251 mvq->avail_idx = state->split.avail_index;
2259 struct mlx5_vdpa_virtqueue *mvq;
2271 mvq = &ndev->vqs[idx];
2276 if (!mvq->initialized) {
2281 state->split.avail_index = mvq->used_idx;
2285 err = query_virtqueue(ndev, mvq, &attr);
2410 struct mlx5_vdpa_virtqueue *mvq;
2414 mvq = &ndev->vqs[i];
2415 if (!mvq->initialized)
2418 teardown_vq(ndev, mvq);
2612 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
2614 struct mlx5_vq_restore_info *ri = &mvq->ri;
2618 if (mvq->initialized) {
2619 err = query_virtqueue(ndev, mvq, &attr);
2626 ri->ready = mvq->ready;
2627 ri->num_ent = mvq->num_ent;
2628 ri->desc_addr = mvq->desc_addr;
2629 ri->device_addr = mvq->device_addr;
2630 ri->driver_addr = mvq->driver_addr;
2631 ri->map = mvq->map;
2657 struct mlx5_vdpa_virtqueue *mvq;
2664 mvq = &ndev->vqs[i];
2665 ri = &mvq->ri;
2669 mvq->avail_idx = ri->avail_index;
2670 mvq->used_idx = ri->used_index;
2671 mvq->ready = ri->ready;
2672 mvq->num_ent = ri->num_ent;
2673 mvq->desc_addr = ri->desc_addr;
2674 mvq->device_addr = ri->device_addr;
2675 mvq->driver_addr = ri->driver_addr;
2676 mvq->map = ri->map;
3037 struct mlx5_vdpa_virtqueue *mvq;
3045 mvq = &ndev->vqs[idx];
3046 if (!mvq->map.virq)
3049 return mvq->map.virq;
3059 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
3071 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
3079 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id);
3097 struct mlx5_vdpa_virtqueue *mvq;
3117 mvq = &ndev->vqs[idx];
3118 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc);
3161 struct mlx5_vdpa_virtqueue *mvq;
3169 mvq = &ndev->vqs[i];
3170 suspend_vq(ndev, mvq);
3279 struct mlx5_vdpa_virtqueue *mvq;
3283 mvq = &ndev->vqs[i];
3284 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
3285 mvq->index = i;
3286 mvq->ndev = ndev;
3287 mvq->fwqp.fw = true;
3288 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
3291 mvq = &ndev->vqs[i];
3292 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri));
3293 mvq->index = i;
3294 mvq->ndev = ndev;