Lines Matching defs:mvdev
36 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
38 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
40 struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
45 lockdep_assert_held(&mvdev->state_mutex);
46 if (mvdev->mdev_detach)
62 MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
65 err = mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
72 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
77 lockdep_assert_held(&mvdev->state_mutex);
78 if (mvdev->mdev_detach)
82 MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
85 return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
88 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
96 lockdep_assert_held(&mvdev->state_mutex);
97 if (mvdev->mdev_detach)
107 ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp);
110 if (mvdev->saving_migf->state ==
118 complete(&mvdev->saving_migf->save_comp);
127 MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
132 ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
135 complete(&mvdev->saving_migf->save_comp);
145 static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
148 mvdev->tracker.is_err = true;
149 complete(&mvdev->tracker_comp);
155 struct mlx5vf_pci_core_device *mvdev =
160 mutex_lock(&mvdev->state_mutex);
161 mvdev->mdev_detach = false;
162 mlx5vf_state_mutex_unlock(mvdev);
165 mlx5vf_cmd_close_migratable(mvdev);
166 mutex_lock(&mvdev->state_mutex);
167 mvdev->mdev_detach = true;
168 mlx5vf_state_mutex_unlock(mvdev);
177 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
179 if (!mvdev->migrate_cap)
183 set_tracker_error(mvdev);
184 mutex_lock(&mvdev->state_mutex);
185 mlx5vf_disable_fds(mvdev);
186 _mlx5vf_free_page_tracker_resources(mvdev);
187 mlx5vf_state_mutex_unlock(mvdev);
190 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
192 if (!mvdev->migrate_cap)
195 mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
196 &mvdev->nb);
197 destroy_workqueue(mvdev->cb_wq);
200 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
204 struct pci_dev *pdev = mvdev->core_device.pdev;
210 mvdev->mdev = mlx5_vf_get_core_dev(pdev);
211 if (!mvdev->mdev)
214 if (!MLX5_CAP_GEN(mvdev->mdev, migration))
217 mvdev->vf_id = pci_iov_vf_id(pdev);
218 if (mvdev->vf_id < 0)
221 ret = mlx5vf_is_migratable(mvdev->mdev, mvdev->vf_id + 1);
225 if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
226 &mvdev->vhca_id))
229 mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
230 if (!mvdev->cb_wq)
233 mutex_init(&mvdev->state_mutex);
234 spin_lock_init(&mvdev->reset_lock);
235 mvdev->nb.notifier_call = mlx5fv_vf_event;
236 ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
237 &mvdev->nb);
239 destroy_workqueue(mvdev->cb_wq);
243 mvdev->migrate_cap = 1;
244 mvdev->core_device.vdev.migration_flags =
247 mvdev->core_device.vdev.mig_ops = mig_ops;
248 init_completion(&mvdev->tracker_comp);
249 if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization))
250 mvdev->core_device.vdev.log_ops = log_ops;
252 if (MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) &&
253 MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state))
254 mvdev->core_device.vdev.migration_flags |=
258 mlx5_vf_put_core_dev(mvdev->mdev);
347 struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev;
348 struct mlx5_core_dev *mdev = mvdev->mdev;
351 lockdep_assert_held(&mvdev->state_mutex);
352 if (mvdev->mdev_detach)
379 lockdep_assert_held(&migf->mvdev->state_mutex);
380 WARN_ON(migf->mvdev->mdev_detach);
383 mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey);
384 dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt,
442 lockdep_assert_held(&migf->mvdev->state_mutex);
443 if (migf->mvdev->mdev_detach)
573 queue_work(migf->mvdev->cb_wq, &async_data->work);
576 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
587 lockdep_assert_held(&mvdev->state_mutex);
588 if (mvdev->mdev_detach)
605 MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
620 if (MLX5VF_PRE_COPY_SUPP(mvdev)) {
659 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
667 lockdep_assert_held(&mvdev->state_mutex);
668 if (mvdev->mdev_detach)
680 MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
683 return mlx5_cmd_exec_inout(mvdev->mdev, load_vhca_state, in, out);
690 lockdep_assert_held(&migf->mvdev->state_mutex);
691 if (migf->mvdev->mdev_detach)
694 err = mlx5_core_alloc_pd(migf->mvdev->mdev, &migf->pdn);
700 lockdep_assert_held(&migf->mvdev->state_mutex);
701 if (migf->mvdev->mdev_detach)
704 mlx5_core_dealloc_pd(migf->mvdev->mdev, migf->pdn);
711 lockdep_assert_held(&migf->mvdev->state_mutex);
712 WARN_ON(migf->mvdev->mdev_detach);
736 struct mlx5vf_pci_core_device *mvdev,
741 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
774 MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id);
909 struct mlx5vf_pci_core_device *mvdev = container_of(
927 set_tracker_error(mvdev);
939 struct mlx5vf_pci_core_device *mvdev =
943 complete(&mvdev->tracker_comp);
1321 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
1323 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
1324 struct mlx5_core_dev *mdev = mvdev->mdev;
1326 lockdep_assert_held(&mvdev->state_mutex);
1328 if (!mvdev->log_active)
1331 WARN_ON(mvdev->mdev_detach);
1341 mvdev->log_active = false;
1346 struct mlx5vf_pci_core_device *mvdev = container_of(
1349 mutex_lock(&mvdev->state_mutex);
1350 if (!mvdev->log_active)
1353 _mlx5vf_free_page_tracker_resources(mvdev);
1354 mvdev->log_active = false;
1356 mlx5vf_state_mutex_unlock(mvdev);
1364 struct mlx5vf_pci_core_device *mvdev = container_of(
1366 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
1376 mutex_lock(&mvdev->state_mutex);
1377 if (mvdev->mdev_detach) {
1382 if (mvdev->log_active) {
1387 mdev = mvdev->mdev;
1443 err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes);
1450 mvdev->log_active = true;
1451 mlx5vf_state_mutex_unlock(mvdev);
1467 mlx5vf_state_mutex_unlock(mvdev);
1570 struct mlx5vf_pci_core_device *mvdev = container_of(
1572 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker;
1577 mutex_lock(&mvdev->state_mutex);
1578 if (!mvdev->log_active) {
1583 if (mvdev->mdev_detach) {
1588 mdev = mvdev->mdev;
1605 wait_for_completion(&mvdev->tracker_comp);
1622 mlx5vf_state_mutex_unlock(mvdev);