Lines Matching defs:order

56  * Minimum page order among possible hugepage sizes, set to a proper value
293 * reference. In order to ensure that one file_region must hold
1248 unsigned int order)
1251 int nr_pages = 1 << order;
1267 static void free_gigantic_page(struct page *page, unsigned int order)
1274 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1278 free_contig_range(page_to_pfn(page), 1 << order);
1332 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1334 unsigned int order) { }
1573 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1576 int nr_pages = 1 << order;
1580 set_compound_order(page, order);
1635 * stable. Due to locking order, we can only trylock_write. If we can
1669 int order = huge_page_order(h);
1687 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
2500 * Note: This only applies to gigantic (order > MAX_ORDER) pages.
2519 * in order to fix confusing memory reports from free(1) and
3262 void __init hugetlb_add_hstate(unsigned int order)
3267 if (size_to_hstate(PAGE_SIZE << order)) {
3271 BUG_ON(order == 0);
3273 h->order = order;
3274 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
3330 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3461 * In order to avoid races with __do_proc_doulongvec_minmax(), we
3639 * undesirable. However, in order to preserve some of the semantics,
4910 * caller from accessing to them.) In order to do this, we use
5127 return pages << h->order;
5748 void __init hugetlb_cma_reserve(int order)
5758 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5760 (PAGE_SIZE << order) / SZ_1M);
5778 size = round_up(size, PAGE_SIZE << order);
5781 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,