Lines Matching defs:page

30  * pagetable page RW, and will disallow using any writable page as a
68 #include <asm/page.h>
78 #include <xen/page.h>
159 * During early boot all page table pages are pinned, but we do not have struct
165 struct page *page = virt_to_page(ptr);
167 return PagePinned(page);
230 /* If page is not pinned, we can just update the entry
241 * Associate a virtual page frame with a given physical page frame
411 /* If page is not pinned, we can just update the entry
449 struct page *page = virt_to_page(pgd_page);
450 user_ptr = (pgd_t *)page->private;
469 * there's a page structure. This implies:
494 /* If page is not pinned, we can just update the entry
534 void (*func)(struct mm_struct *mm, struct page *,
548 void (*func)(struct mm_struct *mm, struct page *,
569 void (*func)(struct mm_struct *mm, struct page *,
588 * callback function on each page it finds making up the page table,
598 void (*func)(struct mm_struct *mm, struct page *,
636 void (*func)(struct mm_struct *mm, struct page *,
643 /* If we're using split pte locks, then take the page's lock and
645 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
650 ptl = ptlock_ptr(page);
673 static void xen_pin_page(struct mm_struct *mm, struct page *page,
676 unsigned pgfl = TestSetPagePinned(page);
679 void *pt = lowmem_page_address(page);
680 unsigned long pfn = page_to_pfn(page);
696 * page while holding the lock. This means the number
706 ptl = xen_pte_lock(page, mm);
763 struct page *page;
767 list_for_each_entry(page, &pgd_list, lru) {
768 if (!PagePinned(page)) {
769 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
770 SetPageSavePinned(page);
777 static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
780 SetPagePinned(page);
785 * that's before we have page structures to store the bits. So do all
796 static void xen_unpin_page(struct mm_struct *mm, struct page *page,
799 unsigned pgfl = TestClearPagePinned(page);
802 void *pt = lowmem_page_address(page);
803 unsigned long pfn = page_to_pfn(page);
810 * the pte page is unpinned but still RO to prevent
815 ptl = xen_pte_lock(page, mm);
867 struct page *page;
871 list_for_each_entry(page, &pgd_list, lru) {
872 if (PageSavePinned(page)) {
873 BUG_ON(!PagePinned(page));
874 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
875 ClearPageSavePinned(page);
1018 * Make a page range writeable and free it.
1110 * also free the page tables mapping the initial P->M table.
1167 * the ramdisk). We continue on, erasing PMD entries that point to page
1359 * Since there are no user-page tables at all, we have two variants
1383 struct page *page = virt_to_page(pgd);
1388 BUG_ON(page->private != 0);
1391 page->private = (unsigned long)user_pgd;
1416 * doesn't allow RO page table pages to be remapped RW.
1418 * If there is no MFN for this PFN then this page is initially
1424 * this point it is not possible to tell if a page is pinned or not,
1435 * page tables for mapping the p2m list, too, and page tables MUST be
1510 /* This needs to make sure the new pte page is pinned iff its being
1520 struct page *page = pfn_to_page(pfn);
1523 SetPagePinned(page);
1546 /* This should never happen until we're OK to use struct page */
1549 struct page *page = pfn_to_page(pfn);
1550 bool pinned = PagePinned(page);
1564 ClearPagePinned(page);
1614 /* Set the page permissions on an identity-mapped pages */
1768 * At this stage there can be no user pgd, and no page structure to
1809 * page tables. Don't rely on big pages being aligned in (guest) physical
1881 * Setup the page tables for addressing the new p2m list.
1989 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2007 /* All local page mappings */
2042 /* Replicate changes to map the vsyscall page into the user
2279 * it require PAE page directories below 4GB. Therefore any calls to
2399 unsigned int domid, bool no_translate, struct page **pages)
2433 * We record the error for each page that gives an error, but