Lines Matching refs:page
24 #include <asm/page.h>
29 static struct page *gmap_alloc_crst(void)
31 struct page *page;
33 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
34 if (!page)
36 arch_set_page_dat(page, CRST_ALLOC_ORDER);
37 return page;
49 struct page *page;
82 page = gmap_alloc_crst();
83 if (!page)
85 page->index = 0;
86 list_add(&page->lru, &gmap->crst_list);
87 table = page_to_virt(page);
196 struct page *page, *next;
202 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
203 __free_pages(page, CRST_ALLOC_ORDER);
209 /* Free all page tables. */
210 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
211 page_table_free_pgste(page);
319 struct page *page;
323 page = gmap_alloc_crst();
324 if (!page)
326 new = page_to_virt(page);
330 list_add(&page->lru, &gmap->crst_list);
333 page->index = gaddr;
334 page = NULL;
337 if (page)
338 __free_pages(page, CRST_ALLOC_ORDER);
350 struct page *page;
355 page = pmd_pgtable_page((pmd_t *) entry);
356 return page->index + offset;
478 * This function does not establish potentially missing page table entries.
502 * This function does not establish potentially missing page table entries.
516 * gmap_unlink - disconnect a page table from the gmap shadow tables
518 * @table: pointer to the host page table
519 * @vmaddr: vm address associated with the host page table
540 * __gmap_link - set up shadow page tables to connect a host to a guest address
563 /* Create higher level tables in the gmap page table */
590 /* Walk the parent mm page table */
606 /* Link gmap segment table entry location to page table. */
701 /* Get pointer to the page table entry */
787 * gmap_table_walk - walk the gmap page tables
790 * @level: page table level to stop at
793 * @level=0 : returns a pointer to a page table table entry (or NULL)
799 * Returns NULL if the gmap page tables could not be walked to the
858 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
872 /* Walk the gmap page table, lock and get pte pointer */
880 * gmap_pte_op_fixup - force a page in and connect the gmap page table
888 * up or connecting the gmap page table.
904 /* Connect the page tables */
909 * gmap_pte_op_end - release the page table lock
911 * @ptl: pointer to the page table spinlock
945 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
1111 * Returns 0 if for each page in the given range a gmap mapping exists,
1115 * This function establishes missing page table entries.
1134 * gmap_read_table - get an unsigned long value from a guest page table using
1135 * absolute addressing, without marking the page referenced.
1304 * gmap_unshadow_page - remove a page from a shadow page table
1315 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1323 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1326 * @pgt: pointer to the start of a shadow page table
1341 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1351 struct page *page;
1363 /* Free page table */
1364 page = phys_to_page(pgt);
1365 list_del(&page->lru);
1366 page_table_free_pgste(page);
1380 struct page *page;
1391 /* Free page table */
1392 page = phys_to_page(pgt);
1393 list_del(&page->lru);
1394 page_table_free_pgste(page);
1409 struct page *page;
1422 page = phys_to_page(sgt);
1423 list_del(&page->lru);
1424 __free_pages(page, CRST_ALLOC_ORDER);
1438 struct page *page;
1450 page = phys_to_page(sgt);
1451 list_del(&page->lru);
1452 __free_pages(page, CRST_ALLOC_ORDER);
1467 struct page *page;
1480 page = phys_to_page(r3t);
1481 list_del(&page->lru);
1482 __free_pages(page, CRST_ALLOC_ORDER);
1497 struct page *page;
1508 page = phys_to_page(r3t);
1509 list_del(&page->lru);
1510 __free_pages(page, CRST_ALLOC_ORDER);
1524 struct page *page;
1538 page = phys_to_page(r2t);
1539 list_del(&page->lru);
1540 __free_pages(page, CRST_ALLOC_ORDER);
1555 struct page *page;
1570 page = phys_to_page(r2t);
1571 list_del(&page->lru);
1572 __free_pages(page, CRST_ALLOC_ORDER);
1577 * gmap_unshadow - remove a shadow page table completely
1662 * The pages of the top level page table referred by the asce parameter
1770 struct page *page;
1775 page = gmap_alloc_crst();
1776 if (!page)
1778 page->index = r2t & _REGION_ENTRY_ORIGIN;
1780 page->index |= GMAP_SHADOW_FAKE_TABLE;
1781 s_r2t = page_to_phys(page);
1802 list_add(&page->lru, &sg->crst_list);
1810 /* Make r2t read-only in parent gmap page table */
1830 __free_pages(page, CRST_ALLOC_ORDER);
1854 struct page *page;
1859 page = gmap_alloc_crst();
1860 if (!page)
1862 page->index = r3t & _REGION_ENTRY_ORIGIN;
1864 page->index |= GMAP_SHADOW_FAKE_TABLE;
1865 s_r3t = page_to_phys(page);
1886 list_add(&page->lru, &sg->crst_list);
1894 /* Make r3t read-only in parent gmap page table */
1914 __free_pages(page, CRST_ALLOC_ORDER);
1938 struct page *page;
1943 page = gmap_alloc_crst();
1944 if (!page)
1946 page->index = sgt & _REGION_ENTRY_ORIGIN;
1948 page->index |= GMAP_SHADOW_FAKE_TABLE;
1949 s_sgt = page_to_phys(page);
1970 list_add(&page->lru, &sg->crst_list);
1978 /* Make sgt read-only in parent gmap page table */
1998 __free_pages(page, CRST_ALLOC_ORDER);
2004 * gmap_shadow_pgt_lookup - find a shadow page table
2007 * @pgt: parent gmap address of the page table to get shadowed
2011 * Returns 0 if the shadow page table was found and -EAGAIN if the page
2021 struct page *page;
2028 /* Shadow page tables are full pages (pte+pgste) */
2029 page = pfn_to_page(*table >> PAGE_SHIFT);
2030 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2032 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2044 * gmap_shadow_pgt - instantiate a shadow page table
2047 * @pgt: parent gmap address of the page table to get shadowed
2061 struct page *page;
2066 /* Allocate a shadow page table */
2067 page = page_table_alloc_pgste(sg->mm);
2068 if (!page)
2070 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2072 page->index |= GMAP_SHADOW_FAKE_TABLE;
2073 s_pgt = page_to_phys(page);
2074 /* Install shadow page table */
2091 list_add(&page->lru, &sg->pt_list);
2099 /* Make pgt read-only in parent gmap page table (not the pgste) */
2117 page_table_free_pgste(page);
2124 * gmap_shadow_page - create a shadow page mapping
2168 /* Get page table pointer */
2228 /* Remove the page table tree from on specific entry */
2259 * @pte: pointer to the page table entry
2262 * This function is assumed to be called with the page table lock held
2556 * mm contracts with s390, that even if mm were to remove a page table,
2558 * it will never insert a page table containing empty zero pages once
2595 /* Fail if the page tables are 2K */
2649 struct page *page = pmd_page(*pmd);
2664 set_bit(PG_arch_1, &page->flags);
2752 * Call the Destroy secure page UVC on each page in the given array of PFNs.
2753 * Each page needs to have an extra reference, which will be released here.
2770 * __s390_uv_destroy_range - Call the destroy secure page UVC on each page
2778 * secure page UVC on each page. Optionally exit early if a fatal signal is
2804 * s390_unlist_old_asce - Remove the topmost level of page tables from the
2805 * list of page tables of the gmap.
2808 * On s390x, KVM keeps a list of all pages containing the page tables of the
2812 * This function removes the topmost page of the tree (the one pointed to by
2817 * intended. Notice that this function will only remove the page from the
2818 * list, the page will still be used as a top level page table (and ASCE).
2822 struct page *old;
2828 * Sometimes the topmost page might need to be "removed" multiple
2836 * to remove the same page from the list again.
2837 * Therefore it's necessary that the page of the ASCE has valid
2853 * If the allocation of the new top level page table fails, the ASCE is not
2862 struct page *page;
2871 page = gmap_alloc_crst();
2872 if (!page)
2874 page->index = 0;
2875 table = page_to_virt(page);
2884 list_add(&page->lru, &gmap->crst_list);