Lines Matching refs:vm
168 static int register_virtio_mem_device(struct virtio_mem *vm)
177 list_add_rcu(&vm->next, &virtio_mem_devices);
187 static void unregister_virtio_mem_device(struct virtio_mem *vm)
191 list_del_rcu(&vm->next);
218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
224 return (addr - mb_addr) / vm->subblock_size;
230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
233 const unsigned long idx = mb_id - vm->first_mb_id;
236 old_state = vm->mb_state[idx];
237 vm->mb_state[idx] = state;
239 BUG_ON(vm->nb_mb_state[old_state] == 0);
240 vm->nb_mb_state[old_state]--;
241 vm->nb_mb_state[state]++;
247 static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
250 const unsigned long idx = mb_id - vm->first_mb_id;
252 return vm->mb_state[idx];
258 static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
260 unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
261 unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
266 if (vm->mb_state && old_pages == new_pages)
273 mutex_lock(&vm->hotplug_mutex);
274 if (vm->mb_state)
275 memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
276 vfree(vm->mb_state);
277 vm->mb_state = new_mb_state;
278 mutex_unlock(&vm->hotplug_mutex);
300 static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
304 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
306 __bitmap_set(vm->sb_bitmap, bit, count);
314 static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
318 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
320 __bitmap_clear(vm->sb_bitmap, bit, count);
326 static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
330 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
333 return test_bit(bit, vm->sb_bitmap);
336 return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
343 static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
347 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
350 return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
354 * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
357 static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
360 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
362 return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
369 static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
371 const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
372 const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
373 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
378 if (vm->sb_bitmap && old_pages == new_pages)
385 mutex_lock(&vm->hotplug_mutex);
387 memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
389 old_sb_bitmap = vm->sb_bitmap;
390 vm->sb_bitmap = new_sb_bitmap;
391 mutex_unlock(&vm->hotplug_mutex);
401 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
406 static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
409 int nid = vm->nid;
418 if (!vm->resource_name) {
419 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
421 if (!vm->resource_name)
425 dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
427 vm->resource_name,
435 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
440 static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
443 int nid = vm->nid;
448 dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
455 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
460 static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
464 int nid = vm->nid;
469 dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
477 static void virtio_mem_retry(struct virtio_mem *vm)
481 spin_lock_irqsave(&vm->removal_lock, flags);
482 if (!vm->removing)
483 queue_work(system_freezable_wq, &vm->wq);
484 spin_unlock_irqrestore(&vm->removal_lock, flags);
487 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
492 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
502 static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
505 unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
506 unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
516 static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
518 return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
521 static int virtio_mem_notify_going_online(struct virtio_mem *vm,
524 switch (virtio_mem_mb_get_state(vm, mb_id)) {
531 dev_warn_ratelimited(&vm->vdev->dev,
536 static void virtio_mem_notify_offline(struct virtio_mem *vm,
539 switch (virtio_mem_mb_get_state(vm, mb_id)) {
541 virtio_mem_mb_set_state(vm, mb_id,
545 virtio_mem_mb_set_state(vm, mb_id,
560 virtio_mem_retry(vm);
563 static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
567 switch (virtio_mem_mb_get_state(vm, mb_id)) {
569 virtio_mem_mb_set_state(vm, mb_id,
573 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
579 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
580 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
584 virtio_mem_retry(vm);
587 static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
590 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
595 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
596 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
605 sb_id * vm->subblock_size);
615 static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
618 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
622 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
623 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
631 sb_id * vm->subblock_size);
646 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
654 if (!virtio_mem_overlaps_range(vm, start, size))
676 mutex_lock(&vm->hotplug_mutex);
677 if (vm->removing) {
679 mutex_unlock(&vm->hotplug_mutex);
682 vm->hotplug_active = true;
683 virtio_mem_notify_going_offline(vm, mb_id);
686 mutex_lock(&vm->hotplug_mutex);
687 if (vm->removing) {
689 mutex_unlock(&vm->hotplug_mutex);
692 vm->hotplug_active = true;
693 rc = virtio_mem_notify_going_online(vm, mb_id);
696 virtio_mem_notify_offline(vm, mb_id);
697 vm->hotplug_active = false;
698 mutex_unlock(&vm->hotplug_mutex);
701 virtio_mem_notify_online(vm, mb_id);
702 vm->hotplug_active = false;
703 mutex_unlock(&vm->hotplug_mutex);
706 if (!vm->hotplug_active)
708 virtio_mem_notify_cancel_offline(vm, mb_id);
709 vm->hotplug_active = false;
710 mutex_unlock(&vm->hotplug_mutex);
713 if (!vm->hotplug_active)
715 vm->hotplug_active = false;
716 mutex_unlock(&vm->hotplug_mutex);
801 struct virtio_mem *vm;
811 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
812 if (!virtio_mem_owned_mb(vm, mb_id))
815 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
820 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
834 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
842 vm->req = *req;
845 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
849 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
852 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
856 virtqueue_kick(vm->vq);
859 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
861 return virtio16_to_cpu(vm->vdev, vm->resp.type);
864 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
867 const uint64_t nb_vm_blocks = size / vm->device_block_size;
869 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
870 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
871 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
874 if (atomic_read(&vm->config_changed))
877 switch (virtio_mem_send_request(vm, &req)) {
879 vm->plugged_size += size;
892 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
895 const uint64_t nb_vm_blocks = size / vm->device_block_size;
897 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
898 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
899 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
902 if (atomic_read(&vm->config_changed))
905 switch (virtio_mem_send_request(vm, &req)) {
907 vm->plugged_size -= size;
918 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
921 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
924 switch (virtio_mem_send_request(vm, &req)) {
926 vm->unplug_all_required = false;
927 vm->plugged_size = 0;
929 atomic_set(&vm->config_changed, 1);
942 static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
946 sb_id * vm->subblock_size;
947 const uint64_t size = count * vm->subblock_size;
950 dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
953 rc = virtio_mem_send_plug_request(vm, addr, size);
955 virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
963 static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
967 sb_id * vm->subblock_size;
968 const uint64_t size = count * vm->subblock_size;
971 dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
974 rc = virtio_mem_send_unplug_request(vm, addr, size);
976 virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
989 static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
995 sb_id = vm->nb_sb_per_mb - 1;
999 virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
1006 virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1011 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1028 static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
1030 uint64_t nb_sb = vm->nb_sb_per_mb;
1032 return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
1038 static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
1043 if (vm->next_mb_id > vm->last_usable_mb_id)
1047 rc = virtio_mem_mb_state_prepare_next_mb(vm);
1052 rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
1056 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
1057 *mb_id = vm->next_mb_id++;
1064 static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
1068 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
1069 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
1079 static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
1083 const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
1093 rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
1101 if (count == vm->nb_sb_per_mb)
1102 virtio_mem_mb_set_state(vm, mb_id,
1105 virtio_mem_mb_set_state(vm, mb_id,
1109 rc = virtio_mem_mb_add(vm, mb_id);
1113 dev_err(&vm->vdev->dev,
1115 rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
1123 virtio_mem_mb_set_state(vm, mb_id, new_state);
1139 static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
1150 sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
1151 if (sb_id >= vm->nb_sb_per_mb)
1155 sb_id + count < vm->nb_sb_per_mb &&
1156 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
1160 rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
1169 sb_id * vm->subblock_size);
1170 nr_pages = PFN_DOWN(count * vm->subblock_size);
1174 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1176 virtio_mem_mb_set_state(vm, mb_id,
1179 virtio_mem_mb_set_state(vm, mb_id,
1189 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1191 uint64_t nb_sb = diff / vm->subblock_size;
1199 mutex_lock(&vm->hotplug_mutex);
1202 virtio_mem_for_each_mb_state(vm, mb_id,
1204 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
1211 virtio_mem_for_each_mb_state(vm, mb_id,
1213 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
1223 mutex_unlock(&vm->hotplug_mutex);
1226 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
1227 if (virtio_mem_too_many_mb_offline(vm))
1230 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1238 if (virtio_mem_too_many_mb_offline(vm))
1241 rc = virtio_mem_prepare_next_mb(vm, &mb_id);
1244 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1252 mutex_unlock(&vm->hotplug_mutex);
1265 static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
1271 rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
1274 if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
1275 virtio_mem_mb_set_state(vm, mb_id,
1280 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1287 virtio_mem_mb_set_state(vm, mb_id,
1290 mutex_unlock(&vm->hotplug_mutex);
1291 rc = virtio_mem_mb_remove(vm, mb_id);
1293 mutex_lock(&vm->hotplug_mutex);
1303 static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
1307 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
1312 sb_id * vm->subblock_size);
1326 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1333 virtio_mem_mb_set_state(vm, mb_id,
1348 static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
1355 if (*nb_sb >= vm->nb_sb_per_mb &&
1356 virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1357 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
1358 vm->nb_sb_per_mb);
1360 *nb_sb -= vm->nb_sb_per_mb;
1367 for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1370 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
1375 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
1389 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1390 mutex_unlock(&vm->hotplug_mutex);
1391 rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1392 mutex_lock(&vm->hotplug_mutex);
1394 virtio_mem_mb_set_state(vm, mb_id,
1404 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1406 uint64_t nb_sb = diff / vm->subblock_size;
1418 mutex_lock(&vm->hotplug_mutex);
1421 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1423 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1431 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1433 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1441 mutex_unlock(&vm->hotplug_mutex);
1446 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1448 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1452 mutex_unlock(&vm->hotplug_mutex);
1454 mutex_lock(&vm->hotplug_mutex);
1458 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1460 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1464 mutex_unlock(&vm->hotplug_mutex);
1466 mutex_lock(&vm->hotplug_mutex);
1469 mutex_unlock(&vm->hotplug_mutex);
1472 mutex_unlock(&vm->hotplug_mutex);
1480 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1485 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
1486 rc = virtio_mem_mb_unplug(vm, mb_id);
1489 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1498 static void virtio_mem_refresh_config(struct virtio_mem *vm)
1504 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1506 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1507 vm->plugged_size = new_plugged_size;
1510 virtio_cread_le(vm->vdev, struct virtio_mem_config,
1512 end_addr = vm->addr + usable_region_size;
1514 vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
1517 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1518 &vm->requested_size);
1520 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1521 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1529 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1533 hrtimer_cancel(&vm->retry_timer);
1535 if (vm->broken)
1542 if (unlikely(vm->unplug_all_required))
1543 rc = virtio_mem_send_unplug_all_request(vm);
1545 if (atomic_read(&vm->config_changed)) {
1546 atomic_set(&vm->config_changed, 0);
1547 virtio_mem_refresh_config(vm);
1552 rc = virtio_mem_unplug_pending_mb(vm);
1554 if (!rc && vm->requested_size != vm->plugged_size) {
1555 if (vm->requested_size > vm->plugged_size) {
1556 diff = vm->requested_size - vm->plugged_size;
1557 rc = virtio_mem_plug_request(vm, diff);
1559 diff = vm->plugged_size - vm->requested_size;
1560 rc = virtio_mem_unplug_request(vm, diff);
1566 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1586 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1594 dev_err(&vm->vdev->dev,
1596 vm->broken = true;
1602 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1605 virtio_mem_retry(vm);
1606 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1613 struct virtio_mem *vm = vq->vdev->priv;
1615 wake_up(&vm->host_resp);
1618 static int virtio_mem_init_vq(struct virtio_mem *vm)
1622 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1626 vm->vq = vq;
1631 static int virtio_mem_init(struct virtio_mem *vm)
1636 if (!vm->vdev->config->get) {
1637 dev_err(&vm->vdev->dev, "config access disabled\n");
1646 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1651 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1652 &vm->plugged_size);
1653 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1654 &vm->device_block_size);
1655 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1657 vm->nid = virtio_mem_translate_node_id(vm, node_id);
1658 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1659 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1660 &vm->region_size);
1666 if (vm->device_block_size > memory_block_size_bytes()) {
1667 dev_err(&vm->vdev->dev,
1673 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1674 dev_warn(&vm->vdev->dev,
1676 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1677 dev_warn(&vm->vdev->dev,
1679 if (vm->addr + vm->region_size > phys_limit)
1680 dev_warn(&vm->vdev->dev,
1689 vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
1691 vm->subblock_size = max_t(uint64_t, vm->device_block_size,
1692 vm->subblock_size);
1693 vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
1696 vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1698 vm->next_mb_id = vm->first_mb_id;
1699 vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
1700 vm->region_size) - 1;
1702 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1703 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1704 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1705 (unsigned long long)vm->device_block_size);
1706 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1708 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1709 (unsigned long long)vm->subblock_size);
1710 if (vm->nid != NUMA_NO_NODE)
1711 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1716 static int virtio_mem_create_resource(struct virtio_mem *vm)
1722 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1727 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1729 if (!vm->parent_resource) {
1731 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1732 dev_info(&vm->vdev->dev,
1738 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1742 static void virtio_mem_delete_resource(struct virtio_mem *vm)
1746 if (!vm->parent_resource)
1749 name = vm->parent_resource->name;
1750 release_resource(vm->parent_resource);
1751 kfree(vm->parent_resource);
1753 vm->parent_resource = NULL;
1758 struct virtio_mem *vm;
1764 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1765 if (!vm)
1768 init_waitqueue_head(&vm->host_resp);
1769 vm->vdev = vdev;
1770 INIT_WORK(&vm->wq, virtio_mem_run_wq);
1771 mutex_init(&vm->hotplug_mutex);
1772 INIT_LIST_HEAD(&vm->next);
1773 spin_lock_init(&vm->removal_lock);
1774 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1775 vm->retry_timer.function = virtio_mem_timer_expired;
1776 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1779 rc = virtio_mem_init_vq(vm);
1784 rc = virtio_mem_init(vm);
1789 rc = virtio_mem_create_resource(vm);
1798 if (vm->plugged_size) {
1799 vm->unplug_all_required = 1;
1800 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1804 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1805 rc = register_memory_notifier(&vm->memory_notifier);
1808 rc = register_virtio_mem_device(vm);
1815 atomic_set(&vm->config_changed, 1);
1816 queue_work(system_freezable_wq, &vm->wq);
1820 unregister_memory_notifier(&vm->memory_notifier);
1822 virtio_mem_delete_resource(vm);
1826 kfree(vm);
1834 struct virtio_mem *vm = vdev->priv;
1842 mutex_lock(&vm->hotplug_mutex);
1843 spin_lock_irq(&vm->removal_lock);
1844 vm->removing = true;
1845 spin_unlock_irq(&vm->removal_lock);
1846 mutex_unlock(&vm->hotplug_mutex);
1849 cancel_work_sync(&vm->wq);
1850 hrtimer_cancel(&vm->retry_timer);
1856 virtio_mem_for_each_mb_state(vm, mb_id,
1858 rc = virtio_mem_mb_remove(vm, mb_id);
1860 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1869 unregister_virtio_mem_device(vm);
1870 unregister_memory_notifier(&vm->memory_notifier);
1877 if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
1878 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
1879 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
1880 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
1883 virtio_mem_delete_resource(vm);
1884 kfree_const(vm->resource_name);
1888 vfree(vm->mb_state);
1889 vfree(vm->sb_bitmap);
1895 kfree(vm);
1901 struct virtio_mem *vm = vdev->priv;
1903 atomic_set(&vm->config_changed, 1);
1904 virtio_mem_retry(vm);