Lines Matching defs:migf
110 static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
112 mutex_lock(&migf->lock);
113 migf->state = MLX5_MIGF_STATE_ERROR;
114 migf->filp->f_pos = 0;
115 mutex_unlock(&migf->lock);
120 struct mlx5_vf_migration_file *migf = filp->private_data;
122 mlx5vf_disable_fd(migf);
123 mutex_destroy(&migf->lock);
124 kfree(migf);
129 mlx5vf_get_data_buff_from_pos(struct mlx5_vf_migration_file *migf, loff_t pos,
136 spin_lock_irq(&migf->list_lock);
137 if (list_empty(&migf->buf_list)) {
142 buf = list_first_entry(&migf->buf_list, struct mlx5_vhca_data_buffer,
154 migf->state = MLX5_MIGF_STATE_ERROR;
157 spin_unlock_irq(&migf->list_lock);
197 spin_lock_irq(&vhca_buf->migf->list_lock);
199 list_add_tail(&vhca_buf->buf_elm, &vhca_buf->migf->avail_list);
200 spin_unlock_irq(&vhca_buf->migf->list_lock);
209 struct mlx5_vf_migration_file *migf = filp->private_data;
220 if (wait_event_interruptible(migf->poll_wait,
221 !list_empty(&migf->buf_list) ||
222 migf->state == MLX5_MIGF_STATE_ERROR ||
223 migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR ||
224 migf->state == MLX5_MIGF_STATE_PRE_COPY ||
225 migf->state == MLX5_MIGF_STATE_COMPLETE))
229 mutex_lock(&migf->lock);
230 if (migf->state == MLX5_MIGF_STATE_ERROR) {
238 vhca_buf = mlx5vf_get_data_buff_from_pos(migf, *pos,
243 if (end_of_data && (migf->state == MLX5_MIGF_STATE_PRE_COPY ||
244 migf->state == MLX5_MIGF_STATE_PRE_COPY_ERROR)) {
249 if (end_of_data && migf->state != MLX5_MIGF_STATE_COMPLETE) {
274 mutex_unlock(&migf->lock);
281 struct mlx5_vf_migration_file *migf = filp->private_data;
284 poll_wait(filp, &migf->poll_wait, wait);
286 mutex_lock(&migf->lock);
287 if (migf->state == MLX5_MIGF_STATE_ERROR)
289 else if (!list_empty(&migf->buf_list) ||
290 migf->state == MLX5_MIGF_STATE_COMPLETE)
292 mutex_unlock(&migf->lock);
299 * Mark migf in error, and wake the user.
301 static void mlx5vf_mark_err(struct mlx5_vf_migration_file *migf)
303 migf->state = MLX5_MIGF_STATE_ERROR;
304 wake_up_interruptible(&migf->poll_wait);
307 static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf)
319 header_buf = mlx5vf_get_data_buffer(migf, size, DMA_NONE);
334 data.stop_copy_size = cpu_to_le64(migf->buf->allocated_length);
338 header_buf->start_pos = header_buf->migf->max_pos;
339 migf->max_pos += header_buf->length;
340 spin_lock_irqsave(&migf->list_lock, flags);
341 list_add_tail(&header_buf->buf_elm, &migf->buf_list);
342 spin_unlock_irqrestore(&migf->list_lock, flags);
343 migf->pre_copy_initial_bytes = size;
350 static int mlx5vf_prep_stop_copy(struct mlx5_vf_migration_file *migf,
361 buf = mlx5vf_get_data_buffer(migf, inc_state_size, DMA_FROM_DEVICE);
365 migf->buf = buf;
366 buf = mlx5vf_get_data_buffer(migf,
373 migf->buf_header = buf;
374 ret = mlx5vf_add_stop_copy_header(migf);
380 mlx5vf_put_data_buffer(migf->buf_header);
381 migf->buf_header = NULL;
383 mlx5vf_put_data_buffer(migf->buf);
384 migf->buf = NULL;
391 struct mlx5_vf_migration_file *migf = filp->private_data;
392 struct mlx5vf_pci_core_device *mvdev = migf->mvdev;
436 mutex_lock(&migf->lock);
437 if (migf->state == MLX5_MIGF_STATE_ERROR) {
442 if (migf->pre_copy_initial_bytes > *pos) {
443 info.initial_bytes = migf->pre_copy_initial_bytes - *pos;
445 info.dirty_bytes = migf->max_pos - *pos;
452 mutex_unlock(&migf->lock);
456 mutex_unlock(&migf->lock);
461 buf = mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE);
464 mlx5vf_mark_err(migf);
468 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, true);
470 mlx5vf_mark_err(migf);
482 mutex_unlock(&migf->lock);
500 struct mlx5_vf_migration_file *migf = mvdev->saving_migf;
505 if (migf->state == MLX5_MIGF_STATE_ERROR)
514 if (migf->buf && migf->buf->allocated_length >= length) {
515 buf = migf->buf;
516 migf->buf = NULL;
518 buf = mlx5vf_get_data_buffer(migf, length, DMA_FROM_DEVICE);
525 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, true, false);
534 mlx5vf_mark_err(migf);
541 struct mlx5_vf_migration_file *migf;
546 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
547 if (!migf)
550 migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_save_fops, migf,
552 if (IS_ERR(migf->filp)) {
553 ret = PTR_ERR(migf->filp);
557 migf->mvdev = mvdev;
558 ret = mlx5vf_cmd_alloc_pd(migf);
562 stream_open(migf->filp->f_inode, migf->filp);
563 mutex_init(&migf->lock);
564 init_waitqueue_head(&migf->poll_wait);
565 init_completion(&migf->save_comp);
571 complete(&migf->save_comp);
572 mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx);
573 INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb);
574 INIT_LIST_HEAD(&migf->buf_list);
575 INIT_LIST_HEAD(&migf->avail_list);
576 spin_lock_init(&migf->list_lock);
582 ret = mlx5vf_prep_stop_copy(migf, length);
587 buf = mlx5vf_alloc_data_buffer(migf, length, DMA_FROM_DEVICE);
593 ret = mlx5vf_cmd_save_vhca_state(mvdev, migf, buf, false, track);
596 return migf;
600 mlx5fv_cmd_clean_migf_resources(migf);
602 fput(migf->filp);
604 kfree(migf);
672 mlx5vf_resume_read_image(struct mlx5_vf_migration_file *migf,
692 migf->load_state = MLX5_VF_LOAD_STATE_LOAD_IMAGE;
693 migf->max_pos += image_size;
701 mlx5vf_resume_read_header_data(struct mlx5_vf_migration_file *migf,
711 required_data = migf->record_size - vhca_buf->length;
722 if (vhca_buf->length == migf->record_size) {
723 switch (migf->record_tag) {
732 migf->stop_copy_prep_size = min_t(u64,
742 migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
743 migf->max_pos += migf->record_size;
751 mlx5vf_resume_read_header(struct mlx5_vf_migration_file *migf,
789 migf->record_size = record_size;
792 migf->record_tag = le32_to_cpup((__le32 *)(to_buff +
794 switch (migf->record_tag) {
796 migf->load_state = MLX5_VF_LOAD_STATE_PREP_IMAGE;
799 migf->load_state = MLX5_VF_LOAD_STATE_PREP_HEADER_DATA;
807 migf->load_state = MLX5_VF_LOAD_STATE_PREP_HEADER_DATA;
810 migf->max_pos += vhca_buf->length;
822 struct mlx5_vf_migration_file *migf = filp->private_data;
823 struct mlx5_vhca_data_buffer *vhca_buf = migf->buf;
824 struct mlx5_vhca_data_buffer *vhca_buf_header = migf->buf_header;
838 mutex_lock(&migf->mvdev->state_mutex);
839 mutex_lock(&migf->lock);
840 if (migf->state == MLX5_MIGF_STATE_ERROR) {
847 switch (migf->load_state) {
849 ret = mlx5vf_resume_read_header(migf, vhca_buf_header,
856 if (vhca_buf_header->allocated_length < migf->record_size) {
859 migf->buf_header = mlx5vf_alloc_data_buffer(migf,
860 migf->record_size, DMA_NONE);
861 if (IS_ERR(migf->buf_header)) {
862 ret = PTR_ERR(migf->buf_header);
863 migf->buf_header = NULL;
867 vhca_buf_header = migf->buf_header;
870 vhca_buf_header->start_pos = migf->max_pos;
871 migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER_DATA;
874 ret = mlx5vf_resume_read_header_data(migf, vhca_buf_header,
881 u64 size = max(migf->record_size,
882 migf->stop_copy_prep_size);
887 migf->buf = mlx5vf_alloc_data_buffer(migf,
889 if (IS_ERR(migf->buf)) {
890 ret = PTR_ERR(migf->buf);
891 migf->buf = NULL;
895 vhca_buf = migf->buf;
898 vhca_buf->start_pos = migf->max_pos;
899 migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE;
910 ret = mlx5vf_resume_read_image(migf, vhca_buf,
911 migf->record_size,
917 ret = mlx5vf_cmd_load_vhca_state(migf->mvdev, migf, vhca_buf);
920 migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
935 migf->state = MLX5_MIGF_STATE_ERROR;
936 mutex_unlock(&migf->lock);
937 mlx5vf_state_mutex_unlock(migf->mvdev);
951 struct mlx5_vf_migration_file *migf;
955 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
956 if (!migf)
959 migf->filp = anon_inode_getfile("mlx5vf_mig", &mlx5vf_resume_fops, migf,
961 if (IS_ERR(migf->filp)) {
962 ret = PTR_ERR(migf->filp);
966 migf->mvdev = mvdev;
967 ret = mlx5vf_cmd_alloc_pd(migf);
971 buf = mlx5vf_alloc_data_buffer(migf, 0, DMA_TO_DEVICE);
977 migf->buf = buf;
979 buf = mlx5vf_alloc_data_buffer(migf,
986 migf->buf_header = buf;
987 migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER;
990 migf->load_state = MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER;
993 stream_open(migf->filp->f_inode, migf->filp);
994 mutex_init(&migf->lock);
995 INIT_LIST_HEAD(&migf->buf_list);
996 INIT_LIST_HEAD(&migf->avail_list);
997 spin_lock_init(&migf->list_lock);
998 return migf;
1000 mlx5vf_free_data_buffer(migf->buf);
1002 mlx5vf_cmd_dealloc_pd(migf);
1004 fput(migf->filp);
1006 kfree(migf);
1070 struct mlx5_vf_migration_file *migf;
1072 migf = mlx5vf_pci_save_device_data(mvdev, false);
1073 if (IS_ERR(migf))
1074 return ERR_CAST(migf);
1075 get_file(migf->filp);
1076 mvdev->saving_migf = migf;
1077 return migf->filp;
1089 struct mlx5_vf_migration_file *migf;
1091 migf = mlx5vf_pci_resume_device_data(mvdev);
1092 if (IS_ERR(migf))
1093 return ERR_CAST(migf);
1094 get_file(migf->filp);
1095 mvdev->resuming_migf = migf;
1096 return migf->filp;
1114 struct mlx5_vf_migration_file *migf;
1116 migf = mlx5vf_pci_save_device_data(mvdev, true);
1117 if (IS_ERR(migf))
1118 return ERR_CAST(migf);
1119 get_file(migf->filp);
1120 mvdev->saving_migf = migf;
1121 return migf->filp;