Lines Matching refs:memslot
383 const struct kvm_memory_slot *memslot)
386 * All current use cases for flushing the TLBs for a specific memslot
388 * mmu_lock. The interaction between the various operations on memslot
390 * operation is observed by any other operation on the same memslot.
393 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
577 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
705 * unmap the memslot instead of skipping the memslot to ensure that KVM
729 * invalidations, including this one, found a relevant memslot at
787 * Prevent memslot modification between range_start() and range_end()
803 * i.e. don't need to rely on memslot overlap checks for performance.
978 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
980 if (!memslot->dirty_bitmap)
983 kvfree(memslot->dirty_bitmap);
984 memslot->dirty_bitmap = NULL;
1000 struct kvm_memory_slot *memslot;
1004 * The same memslot objects live in both active and inactive sets,
1012 hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1013 kvm_free_memslot(kvm, memslot);
1404 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1406 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1408 memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1409 if (!memslot->dirty_bitmap)
1424 * Helper to get the address space ID when one of memslot pointers may be NULL.
1525 * (Re)Add the new memslot. There is no O(1) interval_tree_replace(),
1533 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1599 * Increment the new memslot generation a second time, dropping the
1666 * Update the total number of memslot pages before calling the arch
1687 /* Free the old memslot and all its metadata. */
1728 /* Propagate the new memslot to the now inactive memslots. */
1750 * Mark the current slot INVALID. As with all memslot modifications,
1767 * memslot will be created. Validation of sp->gfn happens in:
1790 /* Add the new memslot to the inactive set and activate. */
1800 * Remove the old memslot (in the inactive memslots) by passing NULL as
1813 * Replace the old memslot in the inactive slots, and then swap slots
1826 * an intermediate step. Instead, the old memslot is simply replaced
1827 * with a new, updated copy in both memslot sets.
1859 * done prior to actually deleting/moving the memslot to allow vCPUs to
1861 * for the memslot when it is deleted/moved. Without pre-invalidation
1864 * guest could access a non-existent memslot.
1920 * will directly hit the final, active memslot. Architectures are
1989 * Note, the old memslot (and the pointer itself!) may be invalidated
2034 /* Allocate a slot that will persist in the memslot. */
2080 * @memslot: set to the associated memslot, always valid on success
2083 int *is_dirty, struct kvm_memory_slot **memslot)
2094 *memslot = NULL;
2103 *memslot = id_to_memslot(slots, id);
2104 if (!(*memslot) || !(*memslot)->dirty_bitmap)
2107 kvm_arch_sync_dirty_log(kvm, *memslot);
2109 n = kvm_dirty_bitmap_bytes(*memslot);
2112 any = (*memslot)->dirty_bitmap[i];
2114 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2148 struct kvm_memory_slot *memslot;
2165 memslot = id_to_memslot(slots, id);
2166 if (!memslot || !memslot->dirty_bitmap)
2169 dirty_bitmap = memslot->dirty_bitmap;
2171 kvm_arch_sync_dirty_log(kvm, memslot);
2173 n = kvm_dirty_bitmap_bytes(memslot);
2186 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2202 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2209 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2259 struct kvm_memory_slot *memslot;
2280 memslot = id_to_memslot(slots, id);
2281 if (!memslot || !memslot->dirty_bitmap)
2284 dirty_bitmap = memslot->dirty_bitmap;
2288 if (log->first_page > memslot->npages ||
2289 log->num_pages > memslot->npages - log->first_page ||
2290 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2293 kvm_arch_sync_dirty_log(kvm, memslot);
2296 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2313 * never includes any bits beyond the length of the memslot (if
2319 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2326 kvm_flush_remote_tlbs_memslot(kvm, memslot);
2358 * This also protects against using a memslot from a different address space,
2386 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2388 return kvm_is_visible_memslot(memslot);
2394 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2396 return kvm_is_visible_memslot(memslot);
2767 /* Do not map writable pfn in the readonly memslot. */
2837 * backed by 'struct page'. A valid example is if the backing memslot is
3130 struct kvm_memory_slot *memslot, gfn_t gfn,
3136 addr = gfn_to_hva_memslot(memslot, gfn);
3142 mark_page_dirty_in_slot(kvm, memslot, gfn);
3229 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3230 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3240 ghc->memslot = NULL;
3274 if (unlikely(!ghc->memslot))
3280 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3312 if (unlikely(!ghc->memslot))
3351 const struct kvm_memory_slot *memslot,
3363 if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3364 unsigned long rel_gfn = gfn - memslot->base_gfn;
3365 u32 slot = (memslot->as_id << 16) | memslot->id;
3369 else if (memslot->dirty_bitmap)
3370 set_bit_le(rel_gfn, memslot->dirty_bitmap);
3377 struct kvm_memory_slot *memslot;
3379 memslot = gfn_to_memslot(kvm, gfn);
3380 mark_page_dirty_in_slot(kvm, memslot, gfn);
3386 struct kvm_memory_slot *memslot;
3388 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3389 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);