Lines Matching defs:end
1382 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
1385 kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K);
1391 ALIGN(end << PAGE_SHIFT, PMD_SIZE))
1392 kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
1569 range->start, range->end - 1, &iterator)
2649 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2719 * page is available, while the caller may end up allocating as many as
2973 u64 *start, u64 *end)
2986 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
3054 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
3404 * - Must be called between walk_shadow_page_lockless_{begin,end}.
4075 * Must be called between walk_shadow_page_lockless_{begin,end}.
4559 * The swaps end up rotating the cache like this:
6210 gfn_t start, end;
6222 end = min(gfn_end, memslot->base_gfn + memslot->npages);
6223 if (WARN_ON_ONCE(start >= end))
6228 start, end - 1, true, flush);
6501 gfn_t start, gfn_t end,
6514 level, level, start, end - 1, true, false);
6520 u64 start, u64 end,
6527 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6529 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, false);
6542 u64 end = start + memslot->npages;
6549 kvm_shadow_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level);
6554 kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);