Lines Matching defs:page
70 * instantiated within the map. The from and to elements are huge page
120 struct page **, struct vm_area_struct **,
124 unsigned long, unsigned long, struct page *);
128 struct page *ref_page);
131 struct page *ref_page);
142 struct page **pagep);
148 int isolate_hugetlb(struct page *page, struct list_head *list);
149 void putback_active_hugepage(struct page *page);
150 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
151 void free_huge_page(struct page *page);
158 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
173 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
175 struct page *follow_huge_pd(struct vm_area_struct *vma,
178 struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
180 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
182 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
204 struct page *hpage)
223 struct vm_area_struct *vma, struct page **pages,
232 static inline struct page *follow_huge_addr(struct mm_struct *mm,
258 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
265 static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
271 static inline struct page *follow_huge_pud(struct mm_struct *mm,
277 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
317 struct page **pagep)
329 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
334 static inline void putback_active_hugepage(struct page *page)
338 static inline void move_hugetlb_state(struct page *oldpage,
339 struct page *newpage, int reason)
352 unsigned long end, struct page *ref_page)
359 unsigned long end, struct page *ref_page)
374 * hugepages at page global directory. If arch support
479 /* Defines one hugetlb page size */
509 struct page *alloc_huge_page(struct vm_area_struct *vma,
511 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
513 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
515 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
607 static inline void arch_clear_hugepage_flags(struct page *page) { }
613 struct page *page, int writable)
619 static inline struct hstate *page_hstate(struct page *page)
621 VM_BUG_ON_PAGE(!PageHuge(page), page);
622 return size_to_hstate(page_size(page));
635 extern int dissolve_free_huge_page(struct page *page);
665 * It determines whether or not a huge page should be placed on
666 * movable zone or not. Movability of any huge page should be
667 * required only if huge page size is supported for migration.
668 * There wont be any reason for the huge page to be movable if
670 * page should be large enough to be placed under a movable zone
674 * So even though large huge page sizes like the gigantic ones
771 void set_page_huge_active(struct page *page);
776 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
783 static inline struct page *
790 static inline struct page *alloc_huge_page_vma(struct hstate *h,
817 static inline struct hstate *page_hstate(struct page *page)
872 static inline int dissolve_free_huge_page(struct page *page)