Lines Matching refs:page
60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
64 struct folio *folio = page_folio(page);
68 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
78 flush_cache_page(vma, vaddr, page_to_pfn(page));
81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
85 struct folio *folio = page_folio(page);
87 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
89 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
99 void copy_user_highpage(struct page *to, struct page *from,
123 /* Make sure this page is cleared on other CPU's too before using it */
128 void clear_user_highpage(struct page *page, unsigned long vaddr)
130 void *kaddr = kmap_atomic(page);
158 void __flush_anon_page(struct page *page, unsigned long vmaddr)
160 struct folio *folio = page_folio(page);
161 unsigned long addr = (unsigned long) page_address(page);
168 kaddr = kmap_coherent(page, vmaddr);
243 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
247 cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1);