Lines Matching refs:memslot

655 					    struct kvm_memory_slot *memslot,
660 struct kvm_memory_slot *memslot;
666 kvm_for_each_memslot(memslot, slots) {
670 hva_start = max(start, memslot->userspace_addr);
671 hva_end = min(end, memslot->userspace_addr +
672 (memslot->npages << PAGE_SHIFT));
680 gfn = hva_to_gfn_memslot(hva_start, memslot);
681 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
682 ret |= handler(kvm, gfn, gfn_end, memslot, data);
690 struct kvm_memory_slot *memslot, void *data)
710 struct kvm_memory_slot *memslot, void *data)
720 /* Mapping may need adjusting depending on memslot flags */
722 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
724 else if (memslot->flags & KVM_MEM_READONLY)
753 struct kvm_memory_slot *memslot, void *data)
760 struct kvm_memory_slot *memslot, void *data)
935 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
945 if ((memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) && write)
948 size = memslot->npages * PAGE_SIZE;
949 gpa_start = memslot->base_gfn << PAGE_SHIFT;
950 uaddr_start = memslot->userspace_addr;
960 * memslot->userspace_addr:
965 * memslot->base_gfn << PAGE_SIZE:
981 * by the memslot. This means we have to prohibit block size mappings
1136 struct kvm_memory_slot *memslot;
1145 memslot = gfn_to_memslot(kvm, gfn);
1146 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
1162 !fault_supports_huge_mapping(memslot, hva, write)) {
1231 * aligned and that the block is contained within the memslot.
1234 if (fault_supports_huge_mapping(memslot, hva, write) &&