Lines Matching refs:end
43 phys_addr_t end,
55 next = stage2_pgd_addr_end(kvm, addr, end);
60 if (resched && next != end)
62 } while (addr = next, addr != end);
67 #define stage2_apply_range_resched(kvm, addr, end, fn) \
68 stage2_apply_range(kvm, addr, end, fn, true)
105 * end up writing old data to disk.
131 phys_addr_t end = start + size;
135 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
148 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
150 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
216 * @to: The virtual kernel end address of the range (exclusive)
228 unsigned long end = kern_hyp_va((unsigned long)to);
234 end = PAGE_ALIGN(end);
236 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
537 * @end: End address of range
539 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
542 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
562 phys_addr_t start, end;
568 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
571 stage2_wp_range(&kvm->arch.mmu, start, end);
593 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
595 stage2_wp_range(&kvm->arch.mmu, start, end);
649 * PMD/PUD entries, because we'll end up mapping the wrong pages.
663 * If we create those stage-2 blocks, we'll end up with this incorrect
675 * for the beginning and end of a non-block aligned and non-block sized
1066 unsigned long end,
1084 hva_end = min(end, memslot->userspace_addr +
1106 unsigned long start, unsigned long end, unsigned flags)
1111 trace_kvm_unmap_hva_range(start, end);
1112 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1135 unsigned long end = hva + PAGE_SIZE;
1148 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
1169 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1173 trace_kvm_age_hva(start, end);
1174 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);