Lines Matching refs:start
118 * @start: The intermediate physical base address of the range to unmap
127 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
131 phys_addr_t end = start + size;
135 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
139 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
141 __unmap_stage2_range(mmu, start, size, true);
190 static int __create_hyp_mappings(unsigned long start, unsigned long size,
196 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
215 * @from: The virtual kernel start address of the range
227 unsigned long start = kern_hyp_va((unsigned long)from);
233 start = start & PAGE_MASK;
236 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
239 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
295 * @phys_addr: The physical start address which gets mapped
331 * @phys_addr: The physical start address which gets mapped
550 * Called to start logging dirty pages after memory region
562 phys_addr_t start, end;
567 start = memslot->base_gfn << PAGE_SHIFT;
571 stage2_wp_range(&kvm->arch.mmu, start, end);
592 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
595 stage2_wp_range(&kvm->arch.mmu, start, end);
715 * THP doesn't start to split while we are adjusting the
1001 * anything about this (there's no syndrome for a start), so
1065 unsigned long start,
1083 hva_start = max(start, memslot->userspace_addr);
1106 unsigned long start, unsigned long end, unsigned flags)
1111 trace_kvm_unmap_hva_range(start, end);
1112 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1169 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1173 trace_kvm_age_hva(start, end);
1174 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);