Lines Matching refs:page
51 * any price. Since page is never written to after the initialization we
64 struct page *page;
75 page = virt_to_page((void *)empty_zero_page);
76 split_page(page, order);
77 for (i = 0; i < (1 << order); i++, page++)
78 mark_page_reserved(page);
83 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
92 BUG_ON(Page_dcache_dirty(page));
99 pte = mk_pte(page, prot);
138 void *kmap_coherent(struct page *page, unsigned long addr)
140 return __kmap_pgprot(page, addr, PAGE_KERNEL);
143 void *kmap_noncoherent(struct page *page, unsigned long addr)
145 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
170 void copy_user_highpage(struct page *to, struct page *from,
190 /* Make sure this page is cleared on other CPU's too before using it */
195 struct page *page, unsigned long vaddr, void *dst, const void *src,
199 page_mapcount(page) && !Page_dcache_dirty(page)) {
200 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
206 SetPageDcacheDirty(page);
209 flush_cache_page(vma, vaddr, page_to_pfn(page));
213 struct page *page, unsigned long vaddr, void *dst, const void *src,
217 page_mapcount(page) && !Page_dcache_dirty(page)) {
218 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
224 SetPageDcacheDirty(page);
447 struct page *page = pfn_to_page(tmp);
450 SetPageReserved(page);
452 free_highmem_page(page);
486 struct page *page = pfn_to_page(pfn);
490 free_reserved_page(page);