Lines Matching defs:mvdev

392 	struct mlx5vf_pci_core_device *mvdev = migf->mvdev;
412 mutex_lock(&mvdev->state_mutex);
413 if (mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY &&
414 mvdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
424 if (mvdev->mig_state == VFIO_DEVICE_STATE_PRE_COPY) {
430 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &inc_length,
468 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, true);
476 mlx5vf_state_mutex_unlock(mvdev);
484 mlx5vf_state_mutex_unlock(mvdev);
498 static int mlx5vf_pci_save_device_inc_data(struct mlx5vf_pci_core_device *mvdev)
500 struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
508 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length,
525 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false);
539 mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
557 migf->mvdev = mvdev;
572 mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
577 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, &length, 0);
593 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, track);
838 mutex_lock(&migf->mvdev->state_mutex);
917 ret = mlx5vf_cmd_load_vhca_state(migf->mvdev, migf, vhca_buf);
937 mlx5vf_state_mutex_unlock(migf->mvdev);
949 mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
966 migf->mvdev = mvdev;
978 if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
1010 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev)
1012 if (mvdev->resuming_migf) {
1013 mlx5vf_disable_fd(mvdev->resuming_migf);
1014 mlx5fv_cmd_clean_migf_resources(mvdev->resuming_migf);
1015 fput(mvdev->resuming_migf->filp);
1016 mvdev->resuming_migf = NULL;
1018 if (mvdev->saving_migf) {
1019 mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx);
1020 cancel_work_sync(&mvdev->saving_migf->async_data.work);
1021 mlx5vf_disable_fd(mvdev->saving_migf);
1022 mlx5fv_cmd_clean_migf_resources(mvdev->saving_migf);
1023 fput(mvdev->saving_migf->filp);
1024 mvdev->saving_migf = NULL;
1029 mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
1032 u32 cur = mvdev->mig_state;
1036 ret = mlx5vf_cmd_suspend_vhca(mvdev,
1044 ret = mlx5vf_cmd_resume_vhca(mvdev,
1053 ret = mlx5vf_cmd_suspend_vhca(mvdev,
1062 ret = mlx5vf_cmd_resume_vhca(mvdev,
1072 migf = mlx5vf_pci_save_device_data(mvdev, false);
1076 mvdev->saving_migf = migf;
1084 mlx5vf_disable_fds(mvdev);
1091 migf = mlx5vf_pci_resume_device_data(mvdev);
1095 mvdev->resuming_migf = migf;
1100 if (!MLX5VF_PRE_COPY_SUPP(mvdev)) {
1101 ret = mlx5vf_cmd_load_vhca_state(mvdev,
1102 mvdev->resuming_migf,
1103 mvdev->resuming_migf->buf);
1107 mlx5vf_disable_fds(mvdev);
1116 migf = mlx5vf_pci_save_device_data(mvdev, true);
1120 mvdev->saving_migf = migf;
1125 ret = mlx5vf_cmd_suspend_vhca(mvdev,
1129 ret = mlx5vf_pci_save_device_inc_data(mvdev);
1144 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
1147 spin_lock(&mvdev->reset_lock);
1148 if (mvdev->deferred_reset) {
1149 mvdev->deferred_reset = false;
1150 spin_unlock(&mvdev->reset_lock);
1151 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1152 mlx5vf_disable_fds(mvdev);
1155 mutex_unlock(&mvdev->state_mutex);
1156 spin_unlock(&mvdev->reset_lock);
1163 struct mlx5vf_pci_core_device *mvdev = container_of(
1169 mutex_lock(&mvdev->state_mutex);
1170 while (new_state != mvdev->mig_state) {
1171 ret = vfio_mig_get_next_state(vdev, mvdev->mig_state,
1177 res = mlx5vf_pci_step_device_state_locked(mvdev, next_state);
1180 mvdev->mig_state = next_state;
1181 if (WARN_ON(res && new_state != mvdev->mig_state)) {
1187 mlx5vf_state_mutex_unlock(mvdev);
1194 struct mlx5vf_pci_core_device *mvdev = container_of(
1199 mutex_lock(&mvdev->state_mutex);
1200 ret = mlx5vf_cmd_query_vhca_migration_state(mvdev,
1204 mlx5vf_state_mutex_unlock(mvdev);
1211 struct mlx5vf_pci_core_device *mvdev = container_of(
1214 mutex_lock(&mvdev->state_mutex);
1215 *curr_state = mvdev->mig_state;
1216 mlx5vf_state_mutex_unlock(mvdev);
1222 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
1224 if (!mvdev->migrate_cap)
1234 spin_lock(&mvdev->reset_lock);
1235 mvdev->deferred_reset = true;
1236 if (!mutex_trylock(&mvdev->state_mutex)) {
1237 spin_unlock(&mvdev->reset_lock);
1240 spin_unlock(&mvdev->reset_lock);
1241 mlx5vf_state_mutex_unlock(mvdev);
1246 struct mlx5vf_pci_core_device *mvdev = container_of(
1248 struct vfio_pci_core_device *vdev = &mvdev->core_device;
1255 if (mvdev->migrate_cap)
1256 mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1263 struct mlx5vf_pci_core_device *mvdev = container_of(
1266 mlx5vf_cmd_close_migratable(mvdev);
1284 struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
1292 mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops,
1300 struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev,
1303 mlx5vf_cmd_remove_migratable(mvdev);
1329 struct mlx5vf_pci_core_device *mvdev;
1332 mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev,
1334 if (IS_ERR(mvdev))
1335 return PTR_ERR(mvdev);
1337 dev_set_drvdata(&pdev->dev, &mvdev->core_device);
1338 ret = vfio_pci_core_register_device(&mvdev->core_device);
1344 vfio_put_device(&mvdev->core_device.vdev);
1350 struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev);
1352 vfio_pci_core_unregister_device(&mvdev->core_device);
1353 vfio_put_device(&mvdev->core_device.vdev);