Lines Matching refs:memslot
70 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
145 struct kvm_memory_slot *memslot)
147 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
148 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
163 struct kvm_memory_slot *memslot;
170 kvm_for_each_memslot(memslot, slots)
171 stage2_flush_memslot(kvm, memslot);
404 struct kvm_memory_slot *memslot)
406 hva_t hva = memslot->userspace_addr;
407 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
408 phys_addr_t size = PAGE_SIZE * memslot->npages;
437 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
454 struct kvm_memory_slot *memslot;
462 kvm_for_each_memslot(memslot, slots)
463 stage2_unmap_memslot(kvm, memslot);
561 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
564 if (WARN_ON_ONCE(!memslot))
567 start = memslot->base_gfn << PAGE_SHIFT;
568 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
627 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
635 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
639 size = memslot->npages * PAGE_SIZE;
641 gpa_start = memslot->base_gfn << PAGE_SHIFT;
643 uaddr_start = memslot->userspace_addr;
653 * memslot->userspace_addr:
658 * memslot->base_gfn << PAGE_SHIFT:
674 * by the memslot. This means we have to prohibit block size mappings
697 transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
706 * block map is contained within the memslot.
709 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
742 struct kvm_memory_slot *memslot, unsigned long hva,
756 bool logging_active = memslot_is_logging(memslot);
795 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
803 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
880 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
955 struct kvm_memory_slot *memslot;
994 memslot = gfn_to_memslot(vcpu->kvm, gfn);
995 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1051 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1073 struct kvm_memory_slot *memslot;
1079 kvm_for_each_memslot(memslot, slots) {
1083 hva_start = max(start, memslot->userspace_addr);
1084 hva_end = min(end, memslot->userspace_addr +
1085 (memslot->npages << PAGE_SHIFT));
1089 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1278 * At this point memslot has been committed and there is an
1295 struct kvm_memory_slot *memslot,
1312 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1350 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1371 stage2_flush_memslot(kvm, memslot);