Lines Matching defs:mvdev

128 static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
130 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
131 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
137 return idx <= mvdev->max_idx;
142 static int setup_driver(struct mlx5_vdpa_dev *mvdev);
150 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
156 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
160 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
163 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
166 static u16 mlx5vdpa16_to_cpu(struct mlx5_vdpa_dev *mvdev, __virtio16 val)
168 return __virtio16_to_cpu(mlx5_vdpa_is_little_endian(mvdev), val);
171 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
173 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
176 static u16 ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev)
178 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
181 return mvdev->max_vqs;
184 static bool is_ctrl_vq_idx(struct mlx5_vdpa_dev *mvdev, u16 idx)
186 return idx == ctrl_vq_idx(mvdev);
189 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
192 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
198 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
200 mlx5_vdpa_info(mvdev, "driver resets the device\n");
212 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
215 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
221 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
223 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
263 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
270 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
272 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
279 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
292 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
293 ndev->mvdev.mdev->priv.numa_node);
309 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
310 ndev->mvdev.mdev->priv.numa_node);
315 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
361 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
375 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
377 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
389 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
391 ndev->mvdev.mdev->priv.numa_node);
396 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
402 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
415 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
431 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
441 vqp->mqp.uid = ndev->mvdev.res.uid;
451 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
465 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
466 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
467 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
469 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
512 void __iomem *uar_page = ndev->mvdev.res.uar->map;
538 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
539 void __iomem *uar_page = ndev->mvdev.res.uar->map;
571 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
587 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
608 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
615 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
619 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
623 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
630 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
645 mlx5_vdpa_warn(&ndev->mvdev,
695 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
722 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
730 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
732 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
768 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
805 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
845 static bool counters_supported(const struct mlx5_vdpa_dev *mvdev)
847 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) &
851 static bool msix_mode_supported(struct mlx5_vdpa_dev *mvdev)
853 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) &
855 pci_msix_can_alloc_dyn(mvdev->mdev->pdev);
879 mlx_features = get_features(ndev->mvdev.actual_features);
884 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
910 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
914 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
921 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
922 if (counters_supported(&ndev->mvdev))
925 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
950 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
953 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
954 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
987 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
999 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
1016 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
1034 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
1078 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
1139 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1140 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1195 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1201 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1216 if (!counters_supported(&ndev->mvdev))
1223 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1225 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
1239 if (!counters_supported(&ndev->mvdev))
1244 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid);
1246 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
1247 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id);
1272 dev_name(&ndev->mvdev.vdev.dev), mvq->index);
1339 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
1373 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
1376 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
1387 for (i = 0; i < ndev->mvdev.max_vqs; i++)
1422 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
1432 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
1457 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid);
1467 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
1477 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
1500 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
1517 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
1529 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
1549 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1553 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1564 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
1575 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter);
1576 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter);
1605 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
1764 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1766 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
1772 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
1796 static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
1798 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1799 struct mlx5_control_vq *cvq = &mvdev->cvq;
1805 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
1822 mlx5_vdpa_warn(mvdev, "failed to delete old MAC %pM from MPFS table\n",
1829 mlx5_vdpa_warn(mvdev, "failed to insert new MAC %pM into MPFS table\n",
1846 mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
1850 mlx5_vdpa_warn(mvdev, "restore mac failed: Original MAC is zero\n");
1858 mlx5_vdpa_warn(mvdev, "restore mac failed: delete MAC %pM from MPFS table failed\n",
1863 mlx5_vdpa_warn(mvdev, "restore mac failed: insert old MAC %pM into MPFS table failed\n",
1870 mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
1885 static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
1887 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1923 static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
1925 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1927 struct mlx5_control_vq *cvq = &mvdev->cvq;
1943 if (!MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ))
1950 newqps = mlx5vdpa16_to_cpu(mvdev, mq.virtqueue_pairs);
1960 if (!change_num_qps(mvdev, newqps))
1971 static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
1973 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1975 struct mlx5_control_vq *cvq = &mvdev->cvq;
1980 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
1989 id = mlx5vdpa16_to_cpu(mvdev, vlan);
2000 id = mlx5vdpa16_to_cpu(mvdev, vlan);
2016 struct mlx5_vdpa_dev *mvdev;
2023 mvdev = wqent->mvdev;
2024 ndev = to_mlx5_vdpa_ndev(mvdev);
2025 cvq = &mvdev->cvq;
2029 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2032 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
2051 status = handle_ctrl_mac(mvdev, ctrl.cmd);
2054 status = handle_ctrl_mq(mvdev, ctrl.cmd);
2057 status = handle_ctrl_vlan(mvdev, ctrl.cmd);
2075 queue_work(mvdev->wq, &wqent->work);
2085 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2086 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2089 if (!is_index_valid(mvdev, idx))
2092 if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
2093 if (!mvdev->wq || !mvdev->cvq.ready)
2096 queue_work(mvdev->wq, &ndev->cvq_ent.work);
2104 iowrite16(idx, ndev->mvdev.res.kick_addr);
2110 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2111 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2114 if (!is_index_valid(mvdev, idx))
2117 if (is_ctrl_vq_idx(mvdev, idx)) {
2118 mvdev->cvq.desc_addr = desc_area;
2119 mvdev->cvq.device_addr = device_area;
2120 mvdev->cvq.driver_addr = driver_area;
2133 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2134 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2137 if (!is_index_valid(mvdev, idx))
2140 if (is_ctrl_vq_idx(mvdev, idx)) {
2141 struct mlx5_control_vq *cvq = &mvdev->cvq;
2153 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2154 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2157 if (is_ctrl_vq_idx(mvdev, idx))
2158 mvdev->cvq.event_cb = *cb;
2171 static void set_cvq_ready(struct mlx5_vdpa_dev *mvdev, bool ready)
2173 struct mlx5_control_vq *cvq = &mvdev->cvq;
2184 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2185 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2189 if (!mvdev->actual_features)
2192 if (!is_index_valid(mvdev, idx))
2195 if (is_ctrl_vq_idx(mvdev, idx)) {
2196 set_cvq_ready(mvdev, ready);
2206 mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
2217 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2218 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2220 if (!is_index_valid(mvdev, idx))
2223 if (is_ctrl_vq_idx(mvdev, idx))
2224 return mvdev->cvq.ready;
2232 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2233 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2236 if (!is_index_valid(mvdev, idx))
2239 if (is_ctrl_vq_idx(mvdev, idx)) {
2240 mvdev->cvq.vring.last_avail_idx = state->split.avail_index;
2246 mlx5_vdpa_warn(mvdev, "can't modify available index\n");
2257 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2258 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2263 if (!is_index_valid(mvdev, idx))
2266 if (is_ctrl_vq_idx(mvdev, idx)) {
2267 state->split.avail_index = mvdev->cvq.vring.last_avail_idx;
2287 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
2301 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2303 if (is_ctrl_vq_idx(mvdev, idx))
2358 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2361 print_features(mvdev, ndev->mvdev.mlx_features, false);
2362 return ndev->mvdev.mlx_features;
2365 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
2387 static int setup_virtqueues(struct mlx5_vdpa_dev *mvdev)
2389 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2393 for (i = 0; i < mvdev->max_vqs; i++) {
2413 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
2422 static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
2424 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_CTRL_VQ)) {
2425 if (MLX5_FEATURE(mvdev, VIRTIO_NET_F_MQ)) {
2427 mvdev->max_idx = mvdev->max_vqs;
2432 mvdev->max_idx = 2;
2436 mvdev->max_idx = 1;
2459 static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
2461 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
2471 struct mlx5_vdpa_dev *mvdev;
2475 mvdev = wqent->mvdev;
2476 ndev = to_mlx5_vdpa_ndev(mvdev);
2477 if (get_link_state(mvdev))
2478 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
2480 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
2496 wqent->mvdev = &ndev->mvdev;
2498 queue_work(ndev->mvdev.wq, &wqent->work);
2527 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS)))
2531 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb);
2542 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb);
2543 if (ndev->mvdev.wq)
2544 flush_workqueue(ndev->mvdev.wq);
2549 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2550 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2553 print_features(mvdev, features, true);
2555 err = verify_driver_features(mvdev, features);
2559 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
2560 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
2561 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
2575 update_cvq_info(mvdev);
2581 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2582 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2605 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2606 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2608 print_status(mvdev, ndev->mvdev.status, false);
2609 return ndev->mvdev.status;
2640 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
2651 for (i = 0; i < ndev->mvdev.max_vqs; i++)
2663 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
2680 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
2683 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2692 mlx5_vdpa_destroy_mr_asid(mvdev, asid);
2693 err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
2697 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
2701 err = setup_driver(mvdev);
2708 mlx5_vdpa_destroy_mr_asid(mvdev, asid);
2714 static int setup_driver(struct mlx5_vdpa_dev *mvdev)
2716 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2722 mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
2732 err = setup_virtqueues(mvdev);
2734 mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
2740 mlx5_vdpa_warn(mvdev, "create_rqt\n");
2746 mlx5_vdpa_warn(mvdev, "create_tir\n");
2752 mlx5_vdpa_warn(mvdev, "setup_steering\n");
2792 for (i = 0; i < ndev->mvdev.max_vqs; i++)
2795 ndev->mvdev.cvq.ready = false;
2798 static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
2800 struct mlx5_control_vq *cvq = &mvdev->cvq;
2803 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
2806 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
2820 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2821 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2824 print_status(mvdev, status, true);
2828 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
2830 err = setup_cvq_vring(mvdev);
2832 mlx5_vdpa_warn(mvdev, "failed to setup control VQ vring\n");
2836 err = setup_driver(mvdev);
2838 mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
2842 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
2847 ndev->mvdev.status = status;
2854 mlx5_vdpa_destroy_mr(&ndev->mvdev);
2855 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
2860 static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
2866 mvdev->group2asid[i] = 0;
2871 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2872 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2874 print_status(mvdev, 0, true);
2875 mlx5_vdpa_info(mvdev, "performing device reset\n");
2881 mlx5_vdpa_destroy_mr(&ndev->mvdev);
2882 ndev->mvdev.status = 0;
2883 ndev->mvdev.suspended = false;
2885 ndev->mvdev.cvq.received_desc = 0;
2886 ndev->mvdev.cvq.completed_desc = 0;
2887 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
2888 ndev->mvdev.actual_features = 0;
2889 init_group_to_asid_map(mvdev);
2890 ++mvdev->generation;
2892 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
2893 if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
2894 mlx5_vdpa_warn(mvdev, "create MR failed\n");
2909 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2910 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2924 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2926 return mvdev->generation;
2929 static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
2935 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
2937 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
2942 err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
2950 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2951 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
2955 err = set_map_data(mvdev, iotlb, asid);
2962 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2964 if (is_ctrl_vq_idx(mvdev, idx))
2967 return mvdev->vdev.dma_dev;
2975 if (!msix_mode_supported(&ndev->mvdev))
2984 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map);
2991 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
2995 ndev = to_mlx5_vdpa_ndev(mvdev);
2998 mlx5_vdpa_destroy_mr(mvdev);
3000 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
3003 mlx5_vdpa_free_resources(&ndev->mvdev);
3011 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3016 if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
3023 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT)
3026 ndev = to_mlx5_vdpa_ndev(mvdev);
3027 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr;
3035 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3036 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
3039 if (!is_index_valid(mvdev, idx))
3042 if (is_ctrl_vq_idx(mvdev, idx))
3054 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3056 return mvdev->actual_features;
3068 if (!counters_supported(&ndev->mvdev))
3078 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
3081 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out));
3095 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3096 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
3104 if (!is_index_valid(mvdev, idx)) {
3110 if (idx == ctrl_vq_idx(mvdev)) {
3111 cvq = &mvdev->cvq;
3146 static void mlx5_vdpa_cvq_suspend(struct mlx5_vdpa_dev *mvdev)
3150 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
3153 cvq = &mvdev->cvq;
3159 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3160 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
3164 mlx5_vdpa_info(mvdev, "suspending device\n");
3172 mlx5_vdpa_cvq_suspend(mvdev);
3173 mvdev->suspended = true;
3181 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3186 mvdev->group2asid[group] = asid;
3244 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
3248 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
3261 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
3273 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
3282 for (i = 0; i < ndev->mvdev.max_vqs; ++i) {
3290 for (; i < ndev->mvdev.max_vqs; i++) {
3331 if (!msix_mode_supported(&ndev->mvdev))
3334 if (!ndev->mvdev.mdev->pdev)
3337 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL);
3342 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
3345 dev_name(&ndev->mvdev.vdev.dev), i);
3346 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL);
3360 struct mlx5_vdpa_dev *mvdev;
3415 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
3420 ndev->mvdev.max_vqs = max_vqs;
3421 mvdev = &ndev->mvdev;
3422 mvdev->mdev = mdev;
3447 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
3451 if (get_link_state(mvdev))
3452 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
3454 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
3482 mlx5_vdpa_warn(&ndev->mvdev,
3489 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2);
3491 ndev->mvdev.mlx_features = device_features;
3492 mvdev->vdev.dma_dev = &mdev->pdev->dev;
3493 err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
3497 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
3498 err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
3507 ndev->cvq_ent.mvdev = mvdev;
3509 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
3510 if (!mvdev->wq) {
3515 mvdev->vdev.mdev = &mgtdev->mgtdev;
3516 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1);
3524 destroy_workqueue(mvdev->wq);
3528 mlx5_vdpa_destroy_mr(mvdev);
3530 mlx5_vdpa_free_resources(&ndev->mvdev);
3535 put_device(&mvdev->vdev.dev);
3542 struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
3543 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
3548 wq = mvdev->wq;
3549 mvdev->wq = NULL;