Lines Matching defs:page

47  *   3. slab_lock(page) (Only on some arches and for debugging)
56 * A. page->freelist -> List of object free in a page
57 * B. page->inuse -> Number of objects in use
58 * C. page->objects -> Number of objects in page
59 * D. page->frozen -> frozen state
63 * slab is the one who can perform list operations on the page. Other
66 * page's freelist.
94 * minimal so we rely on the page allocators per cpu caches for
97 * page->frozen The slab is frozen and exempt from list processing.
195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
345 static __always_inline void slab_lock(struct page *page)
347 VM_BUG_ON_PAGE(PageTail(page), page);
348 bit_spin_lock(PG_locked, &page->flags);
351 static __always_inline void slab_unlock(struct page *page)
353 VM_BUG_ON_PAGE(PageTail(page), page);
354 __bit_spin_unlock(PG_locked, &page->flags);
358 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
367 if (cmpxchg_double(&page->freelist, &page->counters,
374 slab_lock(page);
375 if (page->freelist == freelist_old &&
376 page->counters == counters_old) {
377 page->freelist = freelist_new;
378 page->counters = counters_new;
379 slab_unlock(page);
382 slab_unlock(page);
395 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
403 if (cmpxchg_double(&page->freelist, &page->counters,
413 slab_lock(page);
414 if (page->freelist == freelist_old &&
415 page->counters == counters_old) {
416 page->freelist = freelist_new;
417 page->counters = counters_new;
418 slab_unlock(page);
422 slab_unlock(page);
441 * Determine a map of object in use on a page.
443 * Node listlock must be held to guarantee that the page does
446 static unsigned long *get_map(struct kmem_cache *s, struct page *page)
450 void *addr = page_address(page);
456 bitmap_zero(object_map, page->objects);
458 for (p = page->freelist; p; p = get_freepointer(s, p))
518 /* Verify that a pointer has an address that is valid within a slab page */
520 struct page *page, void *object)
527 base = page_address(page);
530 if (object < base || object >= base + page->objects * s->size ||
640 static void print_page_info(struct page *page)
643 page, page->objects, page->inuse, page->freelist, page->flags);
675 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
679 !check_valid_pointer(s, page, nextfree) && freelist) {
680 object_err(s, page, *freelist, "Freechain corrupt");
689 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
692 u8 *addr = page_address(page);
696 print_page_info(page);
728 void object_err(struct kmem_cache *s, struct page *page,
732 print_trailer(s, page, object);
735 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
745 print_page_info(page);
772 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
778 u8 *addr = page_address(page);
794 print_trailer(s, page, object);
838 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
851 return check_bytes_and_report(s, page, p, "Object padding",
855 /* Check the pad bytes at the end of a slab page */
856 static int slab_pad_check(struct kmem_cache *s, struct page *page)
868 start = page_address(page);
869 length = page_size(page);
884 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
892 static int check_object(struct kmem_cache *s, struct page *page,
899 if (!check_bytes_and_report(s, page, object, "Left Redzone",
903 if (!check_bytes_and_report(s, page, object, "Right Redzone",
908 check_bytes_and_report(s, page, p, "Alignment padding",
916 (!check_bytes_and_report(s, page, p, "Poison", p,
918 !check_bytes_and_report(s, page, p, "End Poison",
924 check_pad_bytes(s, page, p);
935 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
936 object_err(s, page, p, "Freepointer corrupt");
948 static int check_slab(struct kmem_cache *s, struct page *page)
954 if (!PageSlab(page)) {
955 slab_err(s, page, "Not a valid slab page");
959 maxobj = order_objects(compound_order(page), s->size);
960 if (page->objects > maxobj) {
961 slab_err(s, page, "objects %u > max %u",
962 page->objects, maxobj);
965 if (page->inuse > page->objects) {
966 slab_err(s, page, "inuse %u > max %u",
967 page->inuse, page->objects);
971 slab_pad_check(s, page);
976 * Determine if a certain object on a page is on the freelist. Must hold the
979 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
986 fp = page->freelist;
987 while (fp && nr <= page->objects) {
990 if (!check_valid_pointer(s, page, fp)) {
992 object_err(s, page, object,
996 slab_err(s, page, "Freepointer corrupt");
997 page->freelist = NULL;
998 page->inuse = page->objects;
1009 max_objects = order_objects(compound_order(page), s->size);
1013 if (page->objects != max_objects) {
1014 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1015 page->objects, max_objects);
1016 page->objects = max_objects;
1019 if (page->inuse != page->objects - nr) {
1020 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1021 page->inuse, page->objects - nr);
1022 page->inuse = page->objects - nr;
1028 static void trace(struct kmem_cache *s, struct page *page, void *object,
1035 object, page->inuse,
1036 page->freelist);
1050 struct kmem_cache_node *n, struct page *page)
1056 list_add(&page->slab_list, &n->full);
1059 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1065 list_del(&page->slab_list);
1105 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1116 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1122 memset(addr, POISON_INUSE, page_size(page));
1127 struct page *page, void *object)
1129 if (!check_slab(s, page))
1132 if (!check_valid_pointer(s, page, object)) {
1133 object_err(s, page, object, "Freelist Pointer check fails");
1137 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1144 struct page *page,
1148 if (!alloc_consistency_checks(s, page, object))
1155 trace(s, page, object, 1);
1160 if (PageSlab(page)) {
1162 * If this is a slab page then lets do the best we can
1167 page->inuse = page->objects;
1168 page->freelist = NULL;
1174 struct page *page, void *object, unsigned long addr)
1176 if (!check_valid_pointer(s, page, object)) {
1177 slab_err(s, page, "Invalid object pointer 0x%p", object);
1181 if (on_freelist(s, page, object)) {
1182 object_err(s, page, object, "Object already free");
1186 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1189 if (unlikely(s != page->slab_cache)) {
1190 if (!PageSlab(page)) {
1191 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1193 } else if (!page->slab_cache) {
1198 object_err(s, page, object,
1199 "page slab pointer corrupt.");
1207 struct kmem_cache *s, struct page *page,
1211 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1218 slab_lock(page);
1221 if (!check_slab(s, page))
1229 if (!free_consistency_checks(s, page, object, addr))
1235 trace(s, page, object, 0);
1248 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1251 slab_unlock(page);
1455 struct page *page, void *object) {}
1457 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1460 struct page *page, void *object, unsigned long addr) { return 0; }
1463 struct kmem_cache *s, struct page *page,
1467 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1469 static inline int check_object(struct kmem_cache *s, struct page *page,
1472 struct page *page) {}
1474 struct page *page) {}
1493 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1600 static void *setup_object(struct kmem_cache *s, struct page *page,
1603 setup_object_debug(s, page, object);
1616 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1619 struct page *page;
1623 page = alloc_pages(flags, order);
1625 page = __alloc_pages_node(node, flags, order);
1627 if (page)
1628 account_slab_page(page, order, s);
1630 return page;
1675 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1683 * If the target page allocation failed, the number of objects on the
1684 * page might be smaller than the usual size defined by the cache.
1697 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1704 if (page->objects < 2 || !s->random_seq)
1710 page_limit = page->objects * s->size;
1711 start = fixup_red_left(s, page_address(page));
1714 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1716 cur = setup_object(s, page, cur);
1717 page->freelist = cur;
1719 for (idx = 1; idx < page->objects; idx++) {
1720 next = next_freelist_entry(s, page, &pos, start, page_limit,
1722 next = setup_object(s, page, next);
1736 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1742 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1744 struct page *page;
1766 page = alloc_slab_page(s, alloc_gfp, node, oo);
1767 if (unlikely(!page)) {
1774 page = alloc_slab_page(s, alloc_gfp, node, oo);
1775 if (unlikely(!page))
1780 page->objects = oo_objects(oo);
1782 page->slab_cache = s;
1783 __SetPageSlab(page);
1784 if (page_is_pfmemalloc(page))
1785 SetPageSlabPfmemalloc(page);
1787 kasan_poison_slab(page);
1789 start = page_address(page);
1791 setup_page_debug(s, page, start);
1793 shuffle = shuffle_freelist(s, page);
1797 start = setup_object(s, page, start);
1798 page->freelist = start;
1799 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1801 next = setup_object(s, page, next);
1808 page->inuse = page->objects;
1809 page->frozen = 1;
1814 if (!page)
1817 inc_slabs_node(s, page_to_nid(page), page->objects);
1819 return page;
1822 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1831 static void __free_slab(struct kmem_cache *s, struct page *page)
1833 int order = compound_order(page);
1839 slab_pad_check(s, page);
1840 for_each_object(p, s, page_address(page),
1841 page->objects)
1842 check_object(s, page, p, SLUB_RED_INACTIVE);
1845 __ClearPageSlabPfmemalloc(page);
1846 __ClearPageSlab(page);
1848 page->mapping = NULL;
1851 unaccount_slab_page(page, order, s);
1852 __free_pages(page, order);
1857 struct page *page = container_of(h, struct page, rcu_head);
1859 __free_slab(page->slab_cache, page);
1862 static void free_slab(struct kmem_cache *s, struct page *page)
1865 call_rcu(&page->rcu_head, rcu_free_slab);
1867 __free_slab(s, page);
1870 static void discard_slab(struct kmem_cache *s, struct page *page)
1872 dec_slabs_node(s, page_to_nid(page), page->objects);
1873 free_slab(s, page);
1880 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1884 list_add_tail(&page->slab_list, &n->partial);
1886 list_add(&page->slab_list, &n->partial);
1890 struct page *page, int tail)
1893 __add_partial(n, page, tail);
1897 struct page *page)
1900 list_del(&page->slab_list);
1911 struct kmem_cache_node *n, struct page *page,
1916 struct page new;
1925 freelist = page->freelist;
1926 counters = page->counters;
1930 new.inuse = page->objects;
1939 if (!__cmpxchg_double_slab(s, page,
1945 remove_partial(n, page);
1950 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1951 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1959 struct page *page, *page2;
1974 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
1977 if (!pfmemalloc_match(page, flags))
1980 t = acquire_slab(s, n, page, object == NULL, &objects);
1986 c->page = page;
1990 put_cpu_partial(s, page, 0);
2003 * Get a page from somewhere. Search in increasing NUMA distances.
2067 * Get a partial page, lock it and return it.
2157 static void deactivate_slab(struct kmem_cache *s, struct page *page,
2161 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2166 struct page new;
2167 struct page old;
2169 if (page->freelist) {
2176 * to the page freelist while it is still frozen. Leave the
2179 * There is no need to take the list->lock because the page
2191 if (freelist_corrupted(s, page, &freelist, nextfree))
2195 prior = page->freelist;
2196 counters = page->counters;
2202 } while (!__cmpxchg_double_slab(s, page,
2211 * Stage two: Ensure that the page is unfrozen while the
2216 * with the count. If there is a mismatch then the page
2217 * is not unfrozen but the page is on the wrong list.
2220 * the page from the list that we just put it on again
2226 old.freelist = page->freelist;
2227 old.counters = page->counters;
2249 * that acquire_slab() will see a slab page that
2271 remove_partial(n, page);
2273 remove_full(s, n, page);
2276 add_partial(n, page, tail);
2278 add_full(s, n, page);
2282 if (!__cmpxchg_double_slab(s, page,
2297 discard_slab(s, page);
2301 c->page = NULL;
2318 struct page *page, *discard_page = NULL;
2320 while ((page = slub_percpu_partial(c))) {
2321 struct page new;
2322 struct page old;
2324 slub_set_percpu_partial(c, page);
2326 n2 = get_node(s, page_to_nid(page));
2337 old.freelist = page->freelist;
2338 old.counters = page->counters;
2346 } while (!__cmpxchg_double_slab(s, page,
2352 page->next = discard_page;
2353 discard_page = page;
2355 add_partial(n, page, DEACTIVATE_TO_TAIL);
2364 page = discard_page;
2368 discard_slab(s, page);
2375 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2376 * partial page slot if available.
2381 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2384 struct page *oldpage;
2414 pobjects += page->objects - page->inuse;
2416 page->pages = pages;
2417 page->pobjects = pobjects;
2418 page->next = oldpage;
2420 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2436 deactivate_slab(s, c->page, c->freelist, c);
2448 if (c->page)
2466 return c->page || slub_percpu_partial(c);
2497 static inline int node_match(struct page *page, int node)
2500 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2507 static int count_free(struct page *page)
2509 return page->objects - page->inuse;
2520 int (*get_count)(struct page *))
2524 struct page *page;
2527 list_for_each_entry(page, &n->partial, slab_list)
2528 x += get_count(page);
2576 struct page *page;
2585 page = new_slab(s, flags, node);
2586 if (page) {
2588 if (c->page)
2592 * No other reference to the page yet so we can
2595 freelist = page->freelist;
2596 page->freelist = NULL;
2599 c->page = page;
2606 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2608 if (unlikely(PageSlabPfmemalloc(page)))
2615 * Check the page->freelist of a page and either transfer the freelist to the
2616 * per cpu freelist or deactivate the page.
2618 * The page is still frozen if the return value is not NULL.
2620 * If this function returns NULL then the page has been unfrozen.
2624 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2626 struct page new;
2631 freelist = page->freelist;
2632 counters = page->counters;
2637 new.inuse = page->objects;
2640 } while (!__cmpxchg_double_slab(s, page,
2662 * a call to the page allocator and the setup of a new slab.
2671 struct page *page;
2675 page = c->page;
2676 if (!page) {
2688 if (unlikely(!node_match(page, node))) {
2698 deactivate_slab(s, page, c->freelist, c);
2704 * By rights, we should be searching for a slab page that was
2706 * information when the page leaves the per-cpu allocator
2708 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2709 deactivate_slab(s, page, c->freelist, c);
2718 freelist = get_freelist(s, page);
2721 c->page = NULL;
2732 * page is pointing to the page from which the objects are obtained.
2733 * That page must be frozen for per cpu allocations to work.
2735 VM_BUG_ON(!c->page->frozen);
2743 page = c->page = slub_percpu_partial(c);
2744 slub_set_percpu_partial(c, page);
2756 page = c->page;
2757 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2762 !alloc_debug_processing(s, page, freelist, addr))
2765 deactivate_slab(s, page, get_freepointer(s, freelist), c);
2820 struct page *page;
2847 * on c to guarantee that object and page associated with previous tid
2849 * page could be one associated with next tid and our alloc/free
2862 page = c->page;
2863 if (unlikely(!object || !page || !node_match(page, node))) {
2966 * lock and free the item. If there is no additional partial page
2969 static void __slab_free(struct kmem_cache *s, struct page *page,
2976 struct page new;
2984 !free_debug_processing(s, page, head, tail, cnt, addr))
2992 prior = page->freelist;
2993 counters = page->counters;
3012 n = get_node(s, page_to_nid(page));
3026 } while (!cmpxchg_double_slab(s, page,
3041 * If we just froze the page then put it onto the
3044 put_cpu_partial(s, page, 1);
3059 remove_full(s, n, page);
3060 add_partial(n, page, DEACTIVATE_TO_TAIL);
3071 remove_partial(n, page);
3075 remove_full(s, n, page);
3080 discard_slab(s, page);
3095 * same page) possible by specifying head and tail ptr, plus objects
3099 struct page *page, void *head, void *tail,
3125 if (likely(page == c->page)) {
3140 __slab_free(s, page, head, tail_obj, cnt, addr);
3144 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3153 do_slab_free(s, page, head, tail, cnt, addr);
3174 struct page *page;
3184 * page. It builds a detached freelist directly within the given
3185 * page/objects. This can happen without any need for
3200 struct page *page;
3203 df->page = NULL;
3213 page = virt_to_head_page(object);
3216 if (unlikely(!PageSlab(page))) {
3217 BUG_ON(!PageCompound(page));
3219 __free_pages(page, compound_order(page));
3224 df->s = page->slab_cache;
3230 df->page = page;
3242 /* df->page is always set at this point */
3243 if (df->page == virt_to_head_page(object)) {
3275 if (!df.page)
3278 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3385 * order 0 does not cause fragmentation in the page allocator. Larger objects
3397 * we try to keep the page order as low as possible. So we accept more waste
3398 * of space in favor of a small page order.
3527 struct page *page;
3532 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3534 BUG_ON(!page);
3535 if (page_to_nid(page) != node) {
3540 n = page->freelist;
3548 page->freelist = get_freepointer(kmem_cache_node, n);
3549 page->inuse = 1;
3550 page->frozen = 0;
3553 inc_slabs_node(kmem_cache_node, node, page->objects);
3559 __add_partial(n, page, DEACTIVATE_TO_HEAD);
3818 * list to avoid pounding the page allocator excessively.
3845 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3849 void *addr = page_address(page);
3853 slab_err(s, page, text, s->name);
3854 slab_lock(page);
3856 map = get_map(s, page);
3857 for_each_object(p, s, addr, page->objects) {
3865 slab_unlock(page);
3877 struct page *page, *h;
3881 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3882 if (!page->inuse) {
3883 remove_partial(n, page);
3884 list_add(&page->slab_list, &discard);
3886 list_slab_objects(s, page,
3892 list_for_each_entry_safe(page, h, &discard, slab_list)
3893 discard_slab(s, page);
3983 struct page *page;
3988 page = alloc_pages_node(node, flags, order);
3989 if (page) {
3990 ptr = page_address(page);
3991 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4038 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4048 s = page->slab_cache;
4051 if (ptr < page_address(page))
4052 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4056 offset = (ptr - page_address(page)) % s->size;
4091 struct page *page;
4096 page = virt_to_head_page(object);
4098 if (unlikely(!PageSlab(page))) {
4099 WARN_ON(!PageCompound(page));
4100 return page_size(page);
4103 return slab_ksize(page->slab_cache);
4109 struct page *page;
4117 page = virt_to_head_page(x);
4118 if (unlikely(!PageSlab(page))) {
4119 unsigned int order = compound_order(page);
4121 BUG_ON(!PageCompound(page));
4123 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4125 __free_pages(page, order);
4128 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4148 struct page *page;
4149 struct page *t;
4167 * list_lock. page->inuse here is the upper limit.
4169 list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4170 int free = page->objects - page->inuse;
4172 /* Do not reread page->inuse */
4178 if (free == page->objects) {
4179 list_move(&page->slab_list, &discard);
4182 list_move(&page->slab_list, promote + free - 1);
4195 list_for_each_entry_safe(page, t, &discard, slab_list)
4196 discard_slab(s, page);
4330 * the page allocator. Allocate them properly then fix up the pointers
4349 struct page *p;
4510 static int count_inuse(struct page *page)
4512 return page->inuse;
4515 static int count_total(struct page *page)
4517 return page->objects;
4522 static void validate_slab(struct kmem_cache *s, struct page *page)
4525 void *addr = page_address(page);
4528 slab_lock(page);
4530 if (!check_slab(s, page) || !on_freelist(s, page, NULL))
4534 map = get_map(s, page);
4535 for_each_object(p, s, addr, page->objects) {
4539 if (!check_object(s, page, p, val))
4544 slab_unlock(page);
4551 struct page *page;
4556 list_for_each_entry(page, &n->partial, slab_list) {
4557 validate_slab(s, page);
4567 list_for_each_entry(page, &n->full, slab_list) {
4568 validate_slab(s, page);
4719 struct page *page, enum track_item alloc)
4721 void *addr = page_address(page);
4725 map = get_map(s, page);
4726 for_each_object(p, s, addr, page->objects)
4750 struct page *page;
4756 list_for_each_entry(page, &n->partial, slab_list)
4757 process_slab(&t, s, page, alloc);
4758 list_for_each_entry(page, &n->full, slab_list)
4759 process_slab(&t, s, page, alloc);
4924 struct page *page;
4926 page = READ_ONCE(c->page);
4927 if (!page)
4930 node = page_to_nid(page);
4932 x = page->objects;
4934 x = page->inuse;
4941 page = slub_percpu_partial_read_once(c);
4942 if (page) {
4943 node = page_to_nid(page);
4949 x = page->pages;
5147 struct page *page;
5149 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5151 if (page) {
5152 pages += page->pages;
5153 objects += page->pobjects;
5161 struct page *page;
5163 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5165 if (page && len < PAGE_SIZE - 20)
5167 page->pobjects, page->pages);