Lines Matching refs:page

128 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
140 flush_pfn_alias(page_to_pfn(page), uaddr);
149 flush_icache_alias(page_to_pfn(page), uaddr, len);
159 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
167 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
170 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
175 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
179 * Copy user data from/to a page which is mapped into a different
185 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
193 flush_ptrace_access(vma, page, uaddr, dst, len);
199 void __flush_dcache_page(struct address_space *mapping, struct page *page)
203 * page. This ensures that data in the physical page is mutually
206 if (!PageHighMem(page)) {
207 __cpuc_flush_dcache_area(page_address(page), page_size(page));
211 for (i = 0; i < compound_nr(page); i++) {
212 void *addr = kmap_atomic(page + i);
217 for (i = 0; i < compound_nr(page); i++) {
218 void *addr = kmap_high_get(page + i);
221 kunmap_high(page + i);
228 * If this is a page cache page, and we have an aliasing VIPT cache,
230 * userspace colour, which is congruent with page->index.
233 flush_pfn_alias(page_to_pfn(page),
234 page->index << PAGE_SHIFT);
237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
244 * There are possible user space mappings of this page:
246 * data in the current VM view associated with this page.
247 * - aliasing VIPT: we only need to find one mapping of this page.
249 pgoff = page->index;
263 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
272 struct page *page;
282 page = pfn_to_page(pfn);
284 mapping = page_mapping_file(page);
288 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
289 __flush_dcache_page(mapping, page);
298 * of this page.
307 * If the page only exists in the page cache and there are no user
315 void flush_dcache_page(struct page *page)
320 * The zero page is never written to, so never has any dirty
323 if (page == ZERO_PAGE(0))
327 if (test_bit(PG_dcache_clean, &page->flags))
328 clear_bit(PG_dcache_clean, &page->flags);
332 mapping = page_mapping_file(page);
335 mapping && !page_mapcount(page))
336 clear_bit(PG_dcache_clean, &page->flags);
338 __flush_dcache_page(mapping, page);
340 __flush_dcache_aliases(mapping, page);
343 set_bit(PG_dcache_clean, &page->flags);
349 * Ensure cache coherency for the kernel mapping of this page. We can
350 * assume that the page is pinned via kmap.
352 * If the page only exists in the page cache and there are no user
353 * space mappings, this is a no-op since the page was already marked
357 void flush_kernel_dcache_page(struct page *page)
362 mapping = page_mapping_file(page);
367 addr = page_address(page);
369 * kmap_atomic() doesn't set the page virtual
382 * Flush an anonymous page so that users of get_user_pages()
387 * memcpy() to/from page
388 * if written to page, flush_dcache_page()
390 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
401 pfn = page_to_pfn(page);
415 * in this mapping of the page. FIXME: this is overkill
418 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);