Lines Matching defs:page

172 	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
199 struct page *page = pfn_to_page(pfn);
201 if (!PageTransCompoundMap(page))
204 return is_transparent_hugepage(compound_head(page));
368 * We want to publish modifications to the page tables before reading
559 * This sequence increase will notify the kvm page fault that
560 * the page that is going to be mapped in the spte could have
1446 /* Allocate/free page dirty bitmap as needed */
1542 * and reenable dirty page tracking for the corresponding pages.
1551 * 2. Write protect the corresponding page.
1555 * Between 2 and 4, the guest may write to the page using the remaining TLB
1556 * entry. This is not a problem because the page is reported dirty using
1634 * Steps 1-4 below provide general overview of dirty page logging. See
1644 * 2. Write protect the corresponding page.
1663 * and reenable dirty page tracking for the corresponding pages.
1898 struct page *page[1];
1908 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
1909 *pfn = page_to_pfn(page[0]);
1927 struct page *page;
1940 npages = get_user_pages_unlocked(addr, 1, &page, flags);
1946 struct page *wpage;
1950 put_page(page);
1951 page = wpage;
1954 *pfn = page_to_pfn(page);
2043 * Pin guest page in memory and return its pfn.
2047 * host page is not in the memory
2048 * @write_fault: whether we should get a writable host page
2049 * @writable: whether it allows to map a writable host page for !@write_fault
2051 * The function will map a writable host page for these two cases:
2172 struct page **pages, int nr_pages)
2188 static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
2201 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2243 struct page *page = KVM_UNMAPPED_PAGE;
2267 page = pfn_to_page(pfn);
2269 hva = kmap_atomic(page);
2271 hva = kmap(page);
2283 map->page = page;
2317 if (map->page != KVM_UNMAPPED_PAGE) {
2321 kunmap(map->page);
2339 map->page = NULL;
2358 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2368 void kvm_release_page_clean(struct page *page)
2370 WARN_ON(is_error_page(page));
2372 kvm_release_pfn_clean(page_to_pfn(page));
2383 void kvm_release_page_dirty(struct page *page)
2385 WARN_ON(is_error_page(page));
2387 kvm_release_pfn_dirty(page_to_pfn(page));
2404 * Per page-flags.h, pages tagged PG_reserved "should in general not be
2646 /* Use the slow path for cross page reads and writes. */
3148 struct page *page;
3151 page = virt_to_page(vcpu->run);
3154 page = virt_to_page(vcpu->arch.pio_data);
3158 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3162 get_page(page);
3163 vmf->page = page;
3228 struct page *page;
3253 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3254 if (!page) {
3258 vcpu->run = page_address(page);
4006 compat_uptr_t dirty_bitmap; /* one bit per page */
4016 compat_uptr_t dirty_bitmap; /* one bit per page */
4156 r += PAGE_SIZE; /* pio data page */
4159 r += PAGE_SIZE; /* coalesced mmio ring page */