Lines Matching defs:mvdev
129 struct mlx5_vdpa_dev mvdev;
156 mlx5_vdpa_info(mvdev, "%s\n", #_feature); \
162 mlx5_vdpa_info(mvdev, "%s\n", #_status); \
165 static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set)
168 mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n",
174 mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get");
176 mlx5_vdpa_info(mvdev, "driver resets the device\n");
188 static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set)
191 mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n",
197 mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads");
199 mlx5_vdpa_info(mvdev, "all feature bits are cleared\n");
239 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
246 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn);
248 mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err);
255 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn);
268 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf,
269 ndev->mvdev.mdev->priv.numa_node);
285 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf,
286 ndev->mvdev.mdev->priv.numa_node);
291 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf);
337 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid);
351 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
353 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index);
365 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev,
367 ndev->mvdev.mdev->priv.numa_node);
372 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf);
378 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
391 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db);
407 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn);
417 vqp->mqp.uid = ndev->mvdev.res.uid;
427 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
441 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid);
442 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in))
443 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn);
445 mlx5_db_free(ndev->mvdev.mdev, &vqp->db);
484 void __iomem *uar_page = ndev->mvdev.res.uar->map;
510 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
511 void __iomem *uar_page = ndev->mvdev.res.uar->map;
543 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid);
559 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index);
580 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
587 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
591 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn);
595 mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
601 struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
627 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf);
654 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid);
662 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
664 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err);
700 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)))
737 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
785 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
791 get_features_12_3(ndev->mvdev.actual_features));
803 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
807 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
814 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
816 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
840 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid);
843 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) {
844 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id);
876 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid);
888 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid);
905 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid);
923 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid);
967 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen);
1028 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1029 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen);
1064 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
1070 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1087 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n");
1114 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
1143 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
1146 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n");
1184 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
1193 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid);
1200 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) {
1210 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
1220 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn);
1243 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid);
1260 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn);
1267 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
1282 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1284 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
1292 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
1313 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1325 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
1333 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1334 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1340 iowrite16(idx, ndev->mvdev.res.kick_addr);
1346 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1347 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1358 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1359 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1368 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1369 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1377 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1378 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1389 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1390 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1399 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1400 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1404 mlx5_vdpa_warn(mvdev, "can't modify available index\n");
1415 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1416 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1436 mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
1472 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1473 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1476 dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
1477 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features);
1478 if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
1479 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
1480 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
1481 print_features(mvdev, ndev->mvdev.mlx_features, false);
1482 return ndev->mvdev.mlx_features;
1485 static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features)
1512 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) {
1532 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
1542 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
1545 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
1548 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
1550 return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
1555 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1556 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1559 print_features(mvdev, features, true);
1561 err = verify_driver_features(mvdev, features);
1565 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
1566 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
1567 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
1595 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1596 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1598 print_status(mvdev, ndev->mvdev.status, false);
1599 return ndev->mvdev.status;
1631 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1642 for (i = 0; i < ndev->mvdev.max_vqs; i++)
1654 for (i = 0; i < ndev->mvdev.max_vqs; i++) {
1681 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1682 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb);
1686 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
1697 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1708 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n");
1714 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n");
1720 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n");
1726 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n");
1732 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n");
1770 for (i = 0; i < ndev->mvdev.max_vqs; i++)
1776 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1777 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1780 print_status(mvdev, status, true);
1782 mlx5_vdpa_info(mvdev, "performing device reset\n");
1785 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1786 ndev->mvdev.status = 0;
1787 ndev->mvdev.mlx_features = 0;
1788 ++mvdev->generation;
1792 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
1796 mlx5_vdpa_warn(mvdev, "failed to setup driver\n");
1800 mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
1805 ndev->mvdev.status = status;
1809 mlx5_vdpa_destroy_mr(&ndev->mvdev);
1810 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
1816 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1817 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1831 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1833 return mvdev->generation;
1838 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1839 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1843 err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
1845 mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
1857 struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
1861 ndev = to_mlx5_vdpa_ndev(mvdev);
1865 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
1868 mlx5_vdpa_free_resources(&ndev->mvdev);
1930 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n");
1934 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn);
1947 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1959 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn);
1968 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) {
1975 for (; i < ndev->mvdev.max_vqs; i++) {
1987 struct mlx5_vdpa_dev *mvdev;
1996 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
2001 ndev->mvdev.max_vqs = max_vqs;
2002 mvdev = &ndev->mvdev;
2003 mvdev->mdev = mdev;
2022 mvdev->vdev.dma_dev = mdev->device;
2023 err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
2031 err = vdpa_register_device(&mvdev->vdev);
2040 mlx5_vdpa_free_resources(&ndev->mvdev);
2046 put_device(&mvdev->vdev.dev);
2050 void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev)
2052 vdpa_unregister_device(&mvdev->vdev);