Lines Matching refs:page
38 #include <asm/page.h>
342 * Hardware needs alignment to 256 only, but we align to whole page size
346 * Alignments up to the page size are the same for physical and virtual
352 struct page *page;
356 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
358 if (page_ref_inc_return(page) == 2 &&
359 !pagetable_pte_ctor(page_ptdesc(page))) {
360 page_ref_dec(page);
370 struct page *page;
372 page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
374 if (page_ref_dec_return(page) == 1)
375 pagetable_pte_dtor(page_ptdesc(page));
549 /* No need to flush uncacheable page. */
567 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
568 extern void tsunami_flush_page_to_ram(unsigned long page);
569 extern void tsunami_flush_page_for_dma(unsigned long page);
574 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
582 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
583 extern void swift_flush_page_to_ram(unsigned long page);
584 extern void swift_flush_page_for_dma(unsigned long page);
590 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
593 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
597 page &= PAGE_MASK;
604 swift_flush_page(page);
606 "r" (page), "i" (ASI_M_FLUSH_PROBE));
611 /* swift_flush_cache_page(vma, page); */
612 swift_flush_page(page);
615 "r" (page), "i" (ASI_M_FLUSH_PROBE));
625 * chips seems to be much more touchy about DVMA and page tables
634 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
635 extern void viking_flush_page_to_ram(unsigned long page);
636 extern void viking_flush_page_for_dma(unsigned long page);
638 extern void viking_flush_page(unsigned long page);
639 extern void viking_mxcc_flush_page(unsigned long page);
645 unsigned long page);
651 unsigned long page);
657 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
658 extern void hypersparc_flush_page_to_ram(unsigned long page);
659 extern void hypersparc_flush_page_for_dma(unsigned long page);
664 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
769 * looking at the prom's page table directly which is what most
847 /* Create a third-level SRMMU 16MB page mapping. */
1191 * any page fault can cause kernel pages to become user
1254 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1265 static void turbosparc_flush_page_to_ram(unsigned long page)
1270 if (srmmu_probe(page))
1271 turbosparc_flush_page_cache(page);
1280 static void turbosparc_flush_page_for_dma(unsigned long page)
1304 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1638 static void smp_flush_page_for_dma(unsigned long page)
1640 xc1(local_ops->page_for_dma, page);
1641 local_ops->page_for_dma(page);
1718 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1727 xc2(local_ops->cache_page, (unsigned long)vma, page);
1728 local_ops->cache_page(vma, page);
1732 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1741 xc2(local_ops->tlb_page, (unsigned long)vma, page);
1742 local_ops->tlb_page(vma, page);
1746 static void smp_flush_page_to_ram(unsigned long page)
1755 xc1(local_ops->page_to_ram, page);
1757 local_ops->page_to_ram(page);