Lines Matching refs:page
3 * This kernel test validates architecture page table helpers and
43 * On s390 platform, the lower 4 bits are used to identify given page table
45 * pxx_clear() because of how dynamic page table folding works on s390. So
101 * This test needs to be executed after the given page table entry
122 struct page *page;
131 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
132 * when it's released and page allocation check will fail when
133 * the page is allocated again. For architectures other than ARM64,
136 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
137 if (!page)
146 flush_dcache_page(page);
158 flush_dcache_page(page);
171 flush_dcache_page(page);
193 * This test needs to be executed after the given page table entry
212 * A huge page does not point to next level page table
220 struct page *page;
227 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
228 if (!page)
233 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
234 * when it's released and page allocation check will fail when
235 * the page is allocated again. For architectures other than ARM64,
246 flush_dcache_page(page);
258 flush_dcache_page(page);
271 flush_dcache_page(page);
312 * This test needs to be executed after the given page table entry
334 * A huge page does not point to next level page table
342 struct page *page;
349 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
350 if (!page)
355 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
356 * when it's released and page allocation check will fail when
357 * the page is allocated again. For architectures other than ARM64,
372 flush_dcache_page(page);
387 flush_dcache_page(page);
404 flush_dcache_page(page);
529 * This entry points to next level page table page.
566 * This entry points to next level page table page.
600 * This entry points to next level page table page.
618 struct page *page;
621 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
622 if (!page)
627 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
628 * when it's released and page allocation check will fail when
629 * the page is allocated again. For architectures other than ARM64,
640 flush_dcache_page(page);
665 * This entry points to next level page table page.
892 struct page *page;
899 * swap_migration_tests() requires a dedicated page as it needs to
901 * page that actually maps kernel text ('start_kernel') can be real
902 * problematic. Lets use the allocated page explicitly for this
905 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
906 if (!page)
912 * make_[readable|writable]_migration_entry() expects given page to
915 __SetPageLocked(page);
916 swp = make_writable_migration_entry(page_to_pfn(page));
924 swp = make_readable_migration_entry(page_to_pfn(page));
927 __ClearPageLocked(page);
933 struct page *page;
938 * Accessing the page associated with the pfn is safe here,
941 page = pfn_to_page(args->fixed_pmd_pfn);
942 pte = mk_huge_pte(page, args->page_prot);
970 * optimization for transparent huge page. pmd_trans_huge() must
1029 struct page *page = NULL;
1031 /* Free (huge) page */
1039 page = pfn_to_page(args->pud_pfn);
1040 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1054 page = pfn_to_page(args->pmd_pfn);
1055 __free_pages(page, HPAGE_PMD_ORDER);
1063 page = pfn_to_page(args->pte_pfn);
1064 __free_page(page);
1069 /* Free page table entries */
1096 static struct page * __init
1099 struct page *page = NULL;
1103 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1105 if (page) {
1107 return page;
1113 page = alloc_pages(GFP_KERNEL, order);
1115 return page;
1196 struct page *page = NULL;
1203 * will help create page table entries with PROT_NONE permission as
1236 * Allocate page table entries. They will be modified in the tests.
1237 * Lets save the page table entries so that they can be released
1285 page = debug_vm_pgtable_alloc_huge_page(args,
1287 if (page) {
1288 args->pud_pfn = page_to_pfn(page);
1297 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1298 if (page) {
1299 args->pmd_pfn = page_to_pfn(page);
1305 page = alloc_page(GFP_KERNEL);
1306 if (page)
1307 args->pte_pfn = page_to_pfn(page);
1322 pr_info("Validating architecture page table helpers\n");
1329 * the basic page table transformation validations just hold
1331 * given page table entry.
1348 * involve creating page table entries from the protection
1386 * proper page table lock.