Lines Matching refs:memslot

158 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
355 struct kvm_memory_slot *memslot;
362 kvm_for_each_memslot(memslot, bkt, slots)
363 stage2_flush_memslot(kvm, memslot);
942 struct kvm_memory_slot *memslot)
944 hva_t hva = memslot->userspace_addr;
945 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
946 phys_addr_t size = PAGE_SIZE * memslot->npages;
976 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
993 struct kvm_memory_slot *memslot;
1001 kvm_for_each_memslot(memslot, bkt, slots)
1002 stage2_unmap_memslot(kvm, memslot);
1128 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1131 if (WARN_ON_ONCE(!memslot))
1134 start = memslot->base_gfn << PAGE_SHIFT;
1135 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1140 kvm_flush_remote_tlbs_memslot(kvm, memslot);
1155 struct kvm_memory_slot *memslot;
1161 memslot = id_to_memslot(slots, slot);
1163 start = memslot->base_gfn << PAGE_SHIFT;
1164 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1211 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1219 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1223 size = memslot->npages * PAGE_SIZE;
1225 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1227 uaddr_start = memslot->userspace_addr;
1237 * memslot->userspace_addr:
1242 * memslot->base_gfn << PAGE_SHIFT:
1258 * by the memslot. This means we have to prohibit block size mappings
1281 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
1290 * block map is contained within the memslot.
1292 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1367 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
1395 struct kvm_memory_slot *memslot, unsigned long hva,
1409 bool logging_active = memslot_is_logging(memslot);
1465 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1473 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1507 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
1553 vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
1601 mark_page_dirty_in_slot(kvm, memslot, gfn);
1642 struct kvm_memory_slot *memslot;
1701 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1702 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1758 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1972 * At this point memslot has been committed and there is an
2000 * creating the memslot (a nop). Doing it for deletes makes