Lines Matching refs:memslot
695 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
697 if (!memslot->dirty_bitmap)
700 kvfree(memslot->dirty_bitmap);
701 memslot->dirty_bitmap = NULL;
716 struct kvm_memory_slot *memslot;
721 kvm_for_each_memslot(memslot, slots)
722 kvm_free_memslot(kvm, memslot);
1009 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1011 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1013 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1014 if (!memslot->dirty_bitmap)
1021 * Delete a memslot by decrementing the number of used slots and shifting all
1025 struct kvm_memory_slot *memslot)
1030 if (WARN_ON(slots->id_to_index[memslot->id] == -1))
1038 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
1042 mslots[i] = *memslot;
1043 slots->id_to_index[memslot->id] = -1;
1047 * "Insert" a new memslot by incrementing the number of used slots. Returns
1056 * Move a changed memslot backwards in the array by shifting existing slots
1057 * with a higher GFN toward the front of the array. Note, the changed memslot
1059 * its new index into the array is tracked. Returns the changed memslot's
1063 struct kvm_memory_slot *memslot)
1068 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
1073 * Move the target memslot backward in the array by shifting existing
1074 * memslots with a higher GFN (than the target memslot) towards the
1077 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
1078 if (memslot->base_gfn > mslots[i + 1].base_gfn)
1081 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
1083 /* Shift the next memslot forward one and update its index. */
1091 * Move a changed memslot forwards in the array by shifting existing slots with
1092 * a lower GFN toward the back of the array. Note, the changed memslot itself
1094 * index into the array is tracked. Returns the changed memslot's final index
1098 struct kvm_memory_slot *memslot,
1105 if (memslot->base_gfn < mslots[i - 1].base_gfn)
1108 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
1110 /* Shift the next memslot back one and update its index. */
1119 * moved memslot. Sorting memslots by GFN allows using a binary search during
1120 * memslot lookup.
1126 * and knowing the position of the changed memslot. Sorting is also optimized
1127 * by not swapping the updated memslot and instead only shifting other memslots
1128 * and tracking the new index for the update memslot. Only once its final
1129 * index is known is the updated memslot copied into its position in the array.
1131 * - When deleting a memslot, the deleted memslot simply needs to be moved to
1134 * - When creating a memslot, the algorithm "inserts" the new memslot at the
1137 * - When moving a memslot, the algorithm first moves the updated memslot
1138 * backward to handle the scenario where the memslot's GFN was changed to a
1140 * as creating a memslot to move the memslot forward to handle the scenario
1147 * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1150 * performance of memslot lookup. KVM originally used a linear search starting
1151 * at memslots[0]. On x86, the largest memslot usually has one of the highest,
1153 * single memslot above the 4gb boundary. As the largest memslot is also the
1159 struct kvm_memory_slot *memslot,
1165 kvm_memslot_delete(slots, memslot);
1170 i = kvm_memslot_move_backward(slots, memslot);
1171 i = kvm_memslot_move_forward(slots, memslot, i);
1174 * Copy the memslot to its new position in memslots and update
1177 slots->memslots[i] = *memslot;
1178 slots->id_to_index[memslot->id] = i;
1209 * Increment the new memslot generation a second time, dropping the
1234 * when deleting a memslot, as we need a complete duplicate of the memslots for
1235 * use when invalidating a memslot prior to deleting/moving the memslot.
1289 * or moved, memslot will be created.
1332 * for a removed memslot.
1386 * Make a full copy of the old memslot, the pointer will become stale
1388 * memslot needs to be referenced after calling update_memslots(), e.g.
1430 /* Copy dirty_bitmap and arch from the current memslot. */
1500 * @memslot: set to the associated memslot, always valid on success
1503 int *is_dirty, struct kvm_memory_slot **memslot)
1510 *memslot = NULL;
1519 *memslot = id_to_memslot(slots, id);
1520 if (!(*memslot) || !(*memslot)->dirty_bitmap)
1523 kvm_arch_sync_dirty_log(kvm, *memslot);
1525 n = kvm_dirty_bitmap_bytes(*memslot);
1528 any = (*memslot)->dirty_bitmap[i];
1530 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
1564 struct kvm_memory_slot *memslot;
1577 memslot = id_to_memslot(slots, id);
1578 if (!memslot || !memslot->dirty_bitmap)
1581 dirty_bitmap = memslot->dirty_bitmap;
1583 kvm_arch_sync_dirty_log(kvm, memslot);
1585 n = kvm_dirty_bitmap_bytes(memslot);
1598 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1614 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1621 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1671 struct kvm_memory_slot *memslot;
1688 memslot = id_to_memslot(slots, id);
1689 if (!memslot || !memslot->dirty_bitmap)
1692 dirty_bitmap = memslot->dirty_bitmap;
1696 if (log->first_page > memslot->npages ||
1697 log->num_pages > memslot->npages - log->first_page ||
1698 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1701 kvm_arch_sync_dirty_log(kvm, memslot);
1704 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1721 * never includes any bits beyond the length of the memslot (if
1727 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1734 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1766 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1768 return kvm_is_visible_memslot(memslot);
1774 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1776 return kvm_is_visible_memslot(memslot);
2122 /* Do not map writable pfn in the readonly memslot. */
2306 static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
2331 mark_page_dirty_in_slot(memslot, map->gfn);
2540 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
2546 addr = gfn_to_hva_memslot(memslot, gfn);
2552 mark_page_dirty_in_slot(memslot, gfn);
2639 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2640 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2650 ghc->memslot = NULL;
2684 if (unlikely(!ghc->memslot))
2690 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
2722 if (unlikely(!ghc->memslot))
2767 void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn)
2769 if (memslot && memslot->dirty_bitmap) {
2770 unsigned long rel_gfn = gfn - memslot->base_gfn;
2772 set_bit_le(rel_gfn, memslot->dirty_bitmap);
2779 struct kvm_memory_slot *memslot;
2781 memslot = gfn_to_memslot(kvm, gfn);
2782 mark_page_dirty_in_slot(memslot, gfn);
2788 struct kvm_memory_slot *memslot;
2790 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2791 mark_page_dirty_in_slot(memslot, gfn);