Lines Matching defs:page

26  * page long) and always contiguous), and each slab contains multiple
125 #include <asm/page.h>
220 struct kmem_cache_node *n, struct page *page,
374 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
377 return page->s_mem + cache->size * idx;
415 * freelist will be at the end of slab page. The objects will be
552 struct page *page, void *objp)
558 page_node = page_to_nid(page);
1199 * Initialisation. Called after the page allocator have been initialised and
1216 * page orders on machines with more than 32MB of memory if
1316 * Register the timers that return unneeded pages to the page allocator
1362 * Interface to system's page allocator. No need to hold the
1369 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1372 struct page *page;
1376 page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1377 if (!page) {
1382 account_slab_page(page, cachep->gfporder, cachep);
1383 __SetPageSlab(page);
1385 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1386 SetPageSlabPfmemalloc(page);
1388 return page;
1392 * Interface to system's page release.
1394 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1398 BUG_ON(!PageSlab(page));
1399 __ClearPageSlabPfmemalloc(page);
1400 __ClearPageSlab(page);
1401 page_mapcount_reset(page);
1402 page->mapping = NULL;
1406 unaccount_slab_page(page, order, cachep);
1407 __free_pages(page, order);
1413 struct page *page;
1415 page = container_of(head, struct page, rcu_head);
1416 cachep = page->slab_cache;
1418 kmem_freepages(cachep, page);
1554 struct page *page = virt_to_head_page(objp);
1557 objnr = obj_to_index(cachep, page, objp);
1559 objp = index_to_obj(cachep, page, objnr - 1);
1565 objp = index_to_obj(cachep, page, objnr + 1);
1576 struct page *page)
1581 poison_obj(cachep, page->freelist - obj_offset(cachep),
1586 void *objp = index_to_obj(cachep, page, i);
1602 struct page *page)
1610 * @page: page pointer being destroyed
1612 * Destroy all the objs in a slab page, and release the mem back to the system.
1613 * Before calling the slab page must have been unlinked from the cache. The
1616 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1620 freelist = page->freelist;
1621 slab_destroy_debugcheck(cachep, page);
1623 call_rcu(&page->rcu_head, kmem_rcu_free);
1625 kmem_freepages(cachep, page);
1629 * although actual page can be freed in rcu context
1641 struct page *page, *n;
1643 list_for_each_entry_safe(page, n, list, slab_list) {
1644 list_del(&page->slab_list);
1645 slab_destroy(cachep, page);
1650 * calculate_slab_order - calculate size (page order) of slabs
2195 struct page *page;
2207 page = list_entry(p, struct page, slab_list);
2208 list_del(&page->slab_list);
2217 slab_destroy(cache, page);
2292 struct page *page, int colour_off,
2296 void *addr = page_address(page);
2298 page->s_mem = addr + colour_off;
2299 page->active = 0;
2316 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2318 return ((freelist_idx_t *)page->freelist)[idx];
2321 static inline void set_free_obj(struct page *page,
2324 ((freelist_idx_t *)(page->freelist))[idx] = val;
2327 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2333 void *objp = index_to_obj(cachep, page, i);
2417 static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2419 swap(((freelist_idx_t *)page->freelist)[a],
2420 ((freelist_idx_t *)page->freelist)[b]);
2427 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2444 page->freelist = index_to_obj(cachep, page, objfreelist) +
2455 set_free_obj(page, i, i);
2461 swap_free_obj(page, i, rand);
2465 set_free_obj(page, i, next_random_slot(&state));
2469 set_free_obj(page, cachep->num - 1, objfreelist);
2475 struct page *page)
2482 struct page *page)
2488 cache_init_objs_debug(cachep, page);
2491 shuffled = shuffle_freelist(cachep, page);
2494 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2499 objp = index_to_obj(cachep, page, i);
2510 set_free_obj(page, i, i);
2514 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2518 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2519 page->active++;
2525 struct page *page, void *objp)
2527 unsigned int objnr = obj_to_index(cachep, page, objp);
2532 for (i = page->active; i < cachep->num; i++) {
2533 if (get_free_obj(page, i) == objnr) {
2540 page->active--;
2541 if (!page->freelist)
2542 page->freelist = objp + obj_offset(cachep);
2544 set_free_obj(page, page->active, objnr);
2552 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2555 page->slab_cache = cache;
2556 page->freelist = freelist;
2563 static struct page *cache_grow_begin(struct kmem_cache *cachep,
2571 struct page *page;
2588 * Get mem for the objs. Attempt to allocate a physical page from
2591 page = kmem_getpages(cachep, local_flags, nodeid);
2592 if (!page)
2595 page_node = page_to_nid(page);
2614 kasan_poison_slab(page);
2617 freelist = alloc_slabmgmt(cachep, page, offset,
2622 slab_map_pages(cachep, page, freelist);
2624 cache_init_objs(cachep, page);
2629 return page;
2632 kmem_freepages(cachep, page);
2639 static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2646 if (!page)
2649 INIT_LIST_HEAD(&page->slab_list);
2650 n = get_node(cachep, page_to_nid(page));
2654 if (!page->active) {
2655 list_add_tail(&page->slab_list, &n->slabs_free);
2658 fixup_slab_list(cachep, n, page, &list);
2661 n->free_objects += cachep->num - page->active;
2709 struct page *page;
2715 page = virt_to_head_page(objp);
2725 objnr = obj_to_index(cachep, page, objp);
2728 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2758 struct kmem_cache_node *n, struct page *page,
2762 list_del(&page->slab_list);
2763 if (page->active == cachep->num) {
2764 list_add(&page->slab_list, &n->slabs_full);
2769 void **objp = page->freelist;
2775 page->freelist = NULL;
2778 list_add(&page->slab_list, &n->slabs_partial);
2782 static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2783 struct page *page, bool pfmemalloc)
2785 if (!page)
2789 return page;
2791 if (!PageSlabPfmemalloc(page))
2792 return page;
2796 ClearPageSlabPfmemalloc(page);
2797 return page;
2801 list_del(&page->slab_list);
2802 if (!page->active) {
2803 list_add_tail(&page->slab_list, &n->slabs_free);
2806 list_add_tail(&page->slab_list, &n->slabs_partial);
2808 list_for_each_entry(page, &n->slabs_partial, slab_list) {
2809 if (!PageSlabPfmemalloc(page))
2810 return page;
2814 list_for_each_entry(page, &n->slabs_free, slab_list) {
2815 if (!PageSlabPfmemalloc(page)) {
2817 return page;
2824 static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2826 struct page *page;
2829 page = list_first_entry_or_null(&n->slabs_partial, struct page,
2831 if (!page) {
2833 page = list_first_entry_or_null(&n->slabs_free, struct page,
2835 if (page)
2840 page = get_valid_first_slab(n, page, pfmemalloc);
2842 return page;
2848 struct page *page;
2856 page = get_first_slab(n, true);
2857 if (!page) {
2862 obj = slab_get_obj(cachep, page);
2865 fixup_slab_list(cachep, n, page, &list);
2878 struct array_cache *ac, struct page *page, int batchcount)
2884 BUG_ON(page->active >= cachep->num);
2886 while (page->active < cachep->num && batchcount--) {
2891 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2904 struct page *page;
2937 page = get_first_slab(n, false);
2938 if (!page)
2943 batchcount = alloc_block(cachep, ac, page, batchcount);
2944 fixup_slab_list(cachep, n, page, &list);
2963 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2970 if (!ac->avail && page)
2971 alloc_block(cachep, ac, page, batchcount);
2972 cache_grow_end(cachep, page);
3091 * perform an allocation without specifying a node. This allows the page
3102 struct page *page;
3138 page = cache_grow_begin(cache, flags, numa_mem_id());
3139 cache_grow_end(cache, page);
3140 if (page) {
3141 nid = page_to_nid(page);
3165 struct page *page;
3176 page = get_first_slab(n, false);
3177 if (!page)
3186 BUG_ON(page->active == cachep->num);
3188 obj = slab_get_obj(cachep, page);
3191 fixup_slab_list(cachep, n, page, &list);
3199 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3200 if (page) {
3202 obj = slab_get_obj(cachep, page);
3204 cache_grow_end(cachep, page);
3326 struct page *page;
3332 struct page *page;
3336 page = virt_to_head_page(objp);
3337 list_del(&page->slab_list);
3339 slab_put_obj(cachep, page, objp);
3343 if (page->active == 0) {
3344 list_add(&page->slab_list, &n->slabs_free);
3351 list_add_tail(&page->slab_list, &n->slabs_partial);
3358 page = list_last_entry(&n->slabs_free, struct page, slab_list);
3359 list_move(&page->slab_list, list);
3395 struct page *page;
3397 list_for_each_entry(page, &n->slabs_free, slab_list) {
3398 BUG_ON(page->active);
3445 * is per page memory reference) to get nodeid. Instead use a global
3460 struct page *page = virt_to_head_page(objp);
3462 if (unlikely(PageSlabPfmemalloc(page))) {
3463 cache_free_pfmemalloc(cachep, page, objp);
4138 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4148 cachep = page->slab_cache;
4149 objnr = obj_to_index(cachep, page, (void *)ptr);
4153 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);