Lines Matching defs:end
34 static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end,
39 return (boundary - 1 < end - 1) ? boundary : end;
42 static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end)
46 return __stage2_range_addr_end(addr, end, size);
57 phys_addr_t end,
70 next = stage2_range_addr_end(addr, end);
75 if (resched && next != end)
77 } while (addr = next, addr != end);
82 #define stage2_apply_range_resched(mmu, addr, end, fn) \
83 stage2_apply_range(mmu, addr, end, fn, true)
115 phys_addr_t end)
149 next = __stage2_range_addr_end(addr, end, chunk_size);
153 } while (addr = next, addr != end);
297 * end up writing old data to disk.
323 phys_addr_t end = start + size;
327 WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
342 stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush);
518 phys_addr_t start, end, cur;
537 end = PAGE_ALIGN(__pa(to));
538 for (cur = start; cur < end; cur += PAGE_SIZE) {
550 phys_addr_t start, end, cur;
557 end = PAGE_ALIGN(__pa(to));
558 for (cur = start; cur < end; cur += PAGE_SIZE) {
567 * @to: The virtual kernel end address of the range (exclusive)
579 unsigned long end = kern_hyp_va((unsigned long)to);
588 end = PAGE_ALIGN(end);
590 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
1105 * @end: End address of range
1107 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
1109 stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect);
1129 phys_addr_t start, end;
1135 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1138 stage2_wp_range(&kvm->arch.mmu, start, end);
1156 phys_addr_t start, end;
1164 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1167 kvm_mmu_split_huge_pages(kvm, start, end);
1188 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1192 stage2_wp_range(&kvm->arch.mmu, start, end);
1203 kvm_mmu_split_huge_pages(kvm, start, end);
1233 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1247 * If we create those stage-2 blocks, we'll end up with this incorrect
1259 * for the beginning and end of a non-block aligned and non-block sized
1777 (range->end - range->start) << PAGE_SHIFT,
1790 WARN_ON(range->end - range->start != 1);
1819 u64 size = (range->end - range->start) << PAGE_SHIFT;
1831 u64 size = (range->end - range->start) << PAGE_SHIFT;