Lines Matching defs:page

33 #include <asm/page.h>
37 * The kernel provides one architecture bit PG_arch_1 in the page flags that
44 * are coherent. The kernel clears this bit whenever a page is added to the
45 * page cache. At that time, the caches might not be in sync. We, therefore,
52 * page.
59 static inline void kmap_invalidate_coherent(struct page *page,
62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
65 if (!PageHighMem(page)) {
66 kvaddr = (unsigned long)page_to_virt(page);
71 (page_to_phys(page) & DCACHE_ALIAS_MASK);
75 page_to_phys(page));
81 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
84 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
85 *paddr = page_to_phys(page);
89 return page_to_virt(page);
93 void clear_user_highpage(struct page *page, unsigned long vaddr)
96 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
99 kmap_invalidate_coherent(page, vaddr);
100 set_bit(PG_arch_1, &page->flags);
106 void copy_user_highpage(struct page *dst, struct page *src,
124 * Any time the kernel writes to a user page cache page, or it is about to
125 * read from a page cache page this routine is called.
129 void flush_dcache_page(struct page *page)
131 struct address_space *mapping = page_mapping_file(page);
134 * If we have a mapping but the page is not mapped to user-space
135 * yet, we simply mark this page dirty and defer flushing the
140 if (!test_bit(PG_arch_1, &page->flags))
141 set_bit(PG_arch_1, &page->flags);
146 unsigned long phys = page_to_phys(page);
147 unsigned long temp = page->index << PAGE_SHIFT;
152 * Flush the page in kernel space and user space.
175 /* There shouldn't be an entry in the cache for this page anymore. */
192 * Remove any entry in the cache for this page.
219 struct page *page;
224 page = pfn_to_page(pfn);
232 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
233 unsigned long phys = page_to_phys(page);
244 clear_bit(PG_arch_1, &page->flags);
247 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
249 unsigned long paddr = (unsigned long)kmap_atomic(page);
252 set_bit(PG_arch_1, &page->flags);
260 * flush_dcache_page() on the page.
265 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
269 unsigned long phys = page_to_phys(page);
272 /* Flush and invalidate user page if aliased. */
286 * Flush and invalidate kernel page if aliased and synchronize
305 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
309 unsigned long phys = page_to_phys(page);
313 * Flush user page if aliased.