Lines Matching refs:page

60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
64 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
65 test_bit(PG_dcache_clean, &page->flags)) {
66 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72 clear_bit(PG_dcache_clean, &page->flags);
76 flush_cache_page(vma, vaddr, page_to_pfn(page));
79 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
83 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
84 test_bit(PG_dcache_clean, &page->flags)) {
85 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
91 clear_bit(PG_dcache_clean, &page->flags);
95 void copy_user_highpage(struct page *to, struct page *from,
118 /* Make sure this page is cleared on other CPU's too before using it */
123 void clear_user_highpage(struct page *page, unsigned long vaddr)
125 void *kaddr = kmap_atomic(page);
139 struct page *page;
145 page = pfn_to_page(pfn);
147 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
149 __flush_purge_region(page_address(page), PAGE_SIZE);
153 void __flush_anon_page(struct page *page, unsigned long vmaddr)
155 unsigned long addr = (unsigned long) page_address(page);
158 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
159 test_bit(PG_dcache_clean, &page->flags)) {
162 kaddr = kmap_coherent(page, vmaddr);
218 void flush_dcache_page(struct page *page)
220 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
236 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
238 /* Nothing uses the VMA, so just pass the struct page along */
239 cacheop_on_each_cpu(local_flush_icache_page, page, 1);