Lines Matching defs:order

44  * Lock order:
79 * Interrupts are disabled during allocation and deallocation in order to
188 * disabled when slub_debug=O is used and a cache's min order increases with
317 static inline unsigned int order_objects(unsigned int order, unsigned int size)
319 return ((unsigned int)PAGE_SIZE << order) / size;
322 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
326 (order << OO_SHIFT) + order_objects(order, size)
1086 * May be called early in order to allocate a slab for the
1314 * order would increase as a result.
1524 * So in order to make the debug calls that expect irqs to be
1620 unsigned int order = oo_order(oo);
1623 page = alloc_pages(flags, order);
1625 page = __alloc_pages_node(node, flags, order);
1628 account_slab_page(page, order, s);
1759 * Let the initial higher-order allocation fail under memory pressure
1760 * so we fall-back to the minimum order allocation.
1772 * Try a lower order alloc if possible
1833 int order = compound_order(page);
1834 int pages = 1 << order;
1851 unaccount_slab_page(page, order, s);
1852 __free_pages(page, order);
2548 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2553 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
3363 * Notice that the allocation order determines the sizes of the per cpu
3365 * Increasing the allocation order reduces the number of times that slabs
3371 * Mininum / Maximum order of slab pages. This influences locking overhead
3372 * and slab fragmentation. A higher order reduces the number of partial slabs
3381 * Calculate the order of allocation given an slab object size.
3383 * The order of allocation has significant impact on performance and other
3384 * system components. Generally order 0 allocations should be preferred since
3385 * order 0 does not cause fragmentation in the page allocator. Larger objects
3386 * be problematic to put into order 0 slabs because there may be too much
3387 * unused space left. We go to a higher order if more than 1/16th of the slab
3390 * In order to reach satisfactory performance we must ensure that a minimum
3395 * slub_max_order specifies the order where we begin to stop considering the
3397 * we try to keep the page order as low as possible. So we accept more waste
3398 * of space in favor of a small page order.
3400 * Higher order allocations also allow the placement of more objects in a
3402 * requested a higher mininum order then we start with that one instead of
3403 * the smallest order which will fit the object.
3410 unsigned int order;
3415 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3416 order <= max_order; order++) {
3418 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3427 return order;
3432 unsigned int order;
3455 order = slab_order(size, min_objects,
3457 if (order <= slub_max_order)
3458 return order;
3468 order = slab_order(size, 1, slub_max_order, 1);
3469 if (order <= slub_max_order)
3470 return order;
3475 order = slab_order(size, 1, MAX_ORDER, 1);
3476 if (order < MAX_ORDER)
3477 return order;
3537 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3648 * calculate_sizes() determines the order and the distribution of data within
3655 unsigned int order;
3749 * offset 0. In order to align the objects we have to simply size
3756 order = forced_order;
3758 order = calculate_order(size);
3760 if ((int)order < 0)
3764 if (order)
3779 s->oo = oo_make(order, size);
3799 * order increased.
3985 unsigned int order = get_order(size);
3988 page = alloc_pages_node(node, flags, order);
3992 PAGE_SIZE << order);
4119 unsigned int order = compound_order(page);
4124 -(PAGE_SIZE << order));
4125 __free_pages(page, order);
4269 * allocate a kmem_cache_node structure in order to bring the node
4625 int order;
4627 order = get_order(sizeof(struct location) * max);
4629 l = (void *)__get_free_pages(flags, order);
4958 * already held which will conflict with an existing lock order:
5056 SLAB_ATTR_RO(order);