Lines Matching defs:page
32 #include <asm/page.h>
59 * bits determine what page size we use for kernel linear
72 * if these larger page sizes are not supported by the cpu.
131 /* Sanitize what we got from the firmware, by page aligning
179 struct page *mem_map_zero __read_mostly;
197 inline void flush_dcache_page_impl(struct page *page)
205 __flush_dcache_page(page_address(page),
207 page_mapping_file(page) != NULL));
209 if (page_mapping_file(page) != NULL &&
211 __flush_icache_page(__pa(page_address(page)));
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223 static inline void set_dcache_dirty(struct page *page, int this_cpu)
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
244 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
282 struct page *page;
284 page = pfn_to_page(pfn);
285 if (page) {
288 pg_flags = page->flags;
298 flush_dcache_page_impl(page);
300 smp_flush_dcache_page_impl(page, cpu);
302 clear_dcache_dirty_cpu(page, cpu);
449 void flush_dcache_page(struct page *page)
458 * is merely the zero page. The 'bigcore' testcase in GDB
461 if (page == ZERO_PAGE(0))
466 mapping = page_mapping_file(page);
468 int dirty = test_bit(PG_dcache_dirty, &page->flags);
470 int dirty_cpu = dcache_dirty_cpu(page);
474 smp_flush_dcache_page_impl(page, dirty_cpu);
476 set_dcache_dirty(page, this_cpu);
483 flush_dcache_page_impl(page);
880 * physical page address to get the true physical address.
1040 * It is, however, slow to check every single page if it matches a
1894 printk("Allocated %ld bytes for kernel page tables.\n",
1906 void __kernel_map_pages(struct page *page, int numpages, int enable)
1908 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1950 * address, so we can use all that our page tables
1994 * as 4-level page table cannot support more than
2250 /* paging_init() sets up the page tables */
2282 * page->flags usage will work.
2284 * When a page gets marked as dcache-dirty, we store the
2285 * cpu number starting at bit 32 in the page->flags. Also,
2388 /* Set kernel pgd to upper alias so physical page computations
2513 * Set up the zero page, mark it reserved, so that page count
2514 * is not manipulated when freeing the page from user ptes.
2518 prom_printf("paging_init: Cannot alloc zero page.\n");
2537 * and if so the freeing below will free invalid page structs.
2548 unsigned long page;
2550 page = (addr +
2556 free_reserved_page(virt_to_page(page));
2795 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2799 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2838 * cheetah+ page size encodings.
2856 * cheetah+ page size encodings.
2882 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2885 if (page)
2886 pte = (pte_t *) page_address(page);
2893 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2894 if (!page)
2896 if (!pgtable_pte_page_ctor(page)) {
2897 __free_page(page);
2900 return (pte_t *) page_address(page);
2910 struct page *page = virt_to_page(pte);
2912 pgtable_pte_page_dtor(page);
2913 __free_page(page);
3009 /* When changing the page size fields, we
3118 void copy_user_highpage(struct page *to, struct page *from,
3129 /* If this page has ADI enabled, copy over any ADI tags
3153 void copy_highpage(struct page *to, struct page *from)