Lines Matching refs:freelist

56  *	A. page->freelist	-> List of object free in a page
64 * processors may put objects onto the freelist but the processor that
66 * page's freelist.
109 * freelist that allows lockless access to
110 * free objects in addition to the regular freelist
244 * Returns freelist pointer (ptr). With hardening, this is obfuscated
269 /* Returns the freelist pointer recorded at location ptr_addr. */
367 if (cmpxchg_double(&page->freelist, &page->counters,
375 if (page->freelist == freelist_old &&
377 page->freelist = freelist_new;
403 if (cmpxchg_double(&page->freelist, &page->counters,
414 if (page->freelist == freelist_old &&
416 page->freelist = freelist_new;
458 for (p = page->freelist; p; p = get_freepointer(s, p))
643 page, page->objects, page->inuse, page->freelist, page->flags);
676 void **freelist, void *nextfree)
679 !check_valid_pointer(s, page, nextfree) && freelist) {
680 object_err(s, page, *freelist, "Freechain corrupt");
681 *freelist = NULL;
976 * Determine if a certain object on a page is on the freelist. Must hold the
986 fp = page->freelist;
997 page->freelist = NULL;
1036 page->freelist);
1168 page->freelist = NULL;
1205 /* Supports checking bulk free of a constructed freelist */
1239 /* Reached end of constructed freelist yet? */
1248 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1494 void **freelist, void *nextfree)
1558 /* Head and tail of the reconstructed freelist */
1580 /* Move object to the new freelist */
1587 * Adjust the reconstructed freelist depth
1661 /* Initialize each random sequence freelist per cache */
1674 /* Get the next entry on the pre-computed freelist randomized */
1696 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1713 /* First entry is used as the base of the freelist */
1717 page->freelist = cur;
1798 page->freelist = start;
1906 * return the pointer to the freelist.
1914 void *freelist;
1921 * Zap the freelist and set the frozen bit.
1922 * The old freelist is the list of objects for the
1925 freelist = page->freelist;
1931 new.freelist = NULL;
1933 new.freelist = freelist;
1940 freelist, counters,
1941 new.freelist, new.counters,
1946 WARN_ON(!freelist);
1947 return freelist;
2158 void *freelist, struct kmem_cache_cpu *c)
2169 if (page->freelist) {
2176 * to the page freelist while it is still frozen. Leave the
2182 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2188 * 'freelist' is already corrupted. So isolate all objects
2189 * starting at 'freelist'.
2191 if (freelist_corrupted(s, page, &freelist, nextfree))
2195 prior = page->freelist;
2197 set_freepointer(s, freelist, prior);
2204 freelist, new.counters,
2205 "drain percpu freelist"));
2207 freelist = nextfree;
2226 old.freelist = page->freelist;
2232 if (freelist) {
2234 set_freepointer(s, freelist, old.freelist);
2235 new.freelist = freelist;
2237 new.freelist = old.freelist;
2243 else if (new.freelist) {
2283 old.freelist, old.counters,
2284 new.freelist, new.counters,
2302 c->freelist = NULL;
2337 old.freelist = page->freelist;
2342 new.freelist = old.freelist;
2347 old.freelist, old.counters,
2348 new.freelist, new.counters,
2436 deactivate_slab(s, c->page, c->freelist, c);
2574 void *freelist;
2580 freelist = get_partial(s, flags, node, c);
2582 if (freelist)
2583 return freelist;
2595 freelist = page->freelist;
2596 page->freelist = NULL;
2603 return freelist;
2615 * Check the page->freelist of a page and either transfer the freelist to the
2616 * per cpu freelist or deactivate the page.
2628 void *freelist;
2631 freelist = page->freelist;
2638 new.frozen = freelist != NULL;
2641 freelist, counters,
2645 return freelist;
2649 * Slow path. The lockless freelist is empty or we need to perform
2653 * regular freelist. In that case we simply take over the regular freelist
2654 * as the lockless freelist and zap the regular freelist.
2657 * first element of the freelist as the object to allocate now and move the
2658 * rest of the freelist to the lockless freelist.
2670 void *freelist;
2698 deactivate_slab(s, page, c->freelist, c);
2709 deactivate_slab(s, page, c->freelist, c);
2713 /* must check again c->freelist in case of cpu migration or IRQ */
2714 freelist = c->freelist;
2715 if (freelist)
2718 freelist = get_freelist(s, page);
2720 if (!freelist) {
2731 * freelist is pointing to the list of objects to be used.
2736 c->freelist = get_freepointer(s, freelist);
2738 return freelist;
2749 freelist = new_slab_objects(s, gfpflags, node, &c);
2751 if (unlikely(!freelist)) {
2762 !alloc_debug_processing(s, page, freelist, addr))
2765 deactivate_slab(s, page, get_freepointer(s, freelist), c);
2766 return freelist;
2796 * zeroing out freelist pointer.
2810 * The fastpath works by first checking if the lockless freelist can be used.
2861 object = c->freelist;
2875 * 2. Verify that tid and freelist have not been changed
2876 * 3. If they were not changed replace tid and freelist
2883 s->cpu_slab->freelist, s->cpu_slab->tid,
2992 prior = page->freelist;
3094 * Bulk free of a freelist with several objects (all pointing to the
3126 void **freelist = READ_ONCE(c->freelist);
3128 set_freepointer(s, tail_obj, freelist);
3131 s->cpu_slab->freelist, s->cpu_slab->tid,
3132 freelist, tid,
3149 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3176 void *freelist;
3184 * page. It builds a detached freelist directly within the given
3187 * The freelist is build up as a single linked list in the objects.
3188 * The idea is, that this detached freelist can then be bulk
3189 * transferred to the real freelist(s), but only requiring a single
3229 /* Start new detached freelist */
3233 df->freelist = object;
3244 /* Opportunity build freelist */
3245 set_freepointer(df->s, object, df->freelist);
3246 df->freelist = object;
3278 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3304 void *object = c->freelist;
3308 * We may have removed an object from c->freelist using
3318 * of re-populating per CPU c->freelist
3330 c->freelist = get_freepointer(s, object);
3540 n = page->freelist;
3548 page->freelist = get_freepointer(kmem_cache_node, n);
3713 * Store freelist pointer near middle of object to keep
3828 /* Initialize the pre-computed randomized freelist if slab is up */
4533 /* Now we know that a valid freelist exists */