Lines Matching defs:node

4  * objects in per cpu and per node lists.
46 * 2. node->list_lock
68 * The list_lock protects the partial and full list on each node and
153 * - Variable sizing of the per node arrays
1069 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1071 struct kmem_cache_node *n = get_node(s, node);
1081 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1083 struct kmem_cache_node *n = get_node(s, node);
1096 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1098 struct kmem_cache_node *n = get_node(s, node);
1484 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1488 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1490 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1617 gfp_t flags, int node, struct kmem_cache_order_objects oo)
1622 if (node == NUMA_NO_NODE)
1625 page = __alloc_pages_node(node, flags, order);
1742 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1766 page = alloc_slab_page(s, alloc_gfp, node, oo);
1774 page = alloc_slab_page(s, alloc_gfp, node, oo);
1822 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1828 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1954 * Try to allocate a partial slab from a specific node.
2018 * inter node defragmentation and node local allocations. A lower
2023 * returns node local objects. If the ratio is higher then kmalloc()
2024 * may return off node objects because partial slabs are obtained
2069 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
2073 int searchnode = node;
2075 if (node == NUMA_NO_NODE)
2079 if (object || node != NUMA_NO_NODE)
2379 * per node partial list.
2401 * set to the per node partial list.
2497 static inline int node_match(struct page *page, int node)
2500 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2540 int node;
2546 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2556 for_each_kmem_cache_node(s, node, n) {
2565 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2566 node, nr_slabs, nr_objs, nr_free);
2572 int node, struct kmem_cache_cpu **pc)
2580 freelist = get_partial(s, flags, node, c);
2585 page = new_slab(s, flags, node);
2667 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2678 * if the node is not online or has no normal memory, just
2679 * ignore the node constraint
2681 if (unlikely(node != NUMA_NO_NODE &&
2682 !node_state(node, N_NORMAL_MEMORY)))
2683 node = NUMA_NO_NODE;
2688 if (unlikely(!node_match(page, node))) {
2691 * implies node != NUMA_NO_NODE
2693 if (!node_state(node, N_NORMAL_MEMORY)) {
2694 node = NUMA_NO_NODE;
2749 freelist = new_slab_objects(s, gfpflags, node, &c);
2752 slab_out_of_memory(s, gfpflags, node);
2773 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2789 p = ___slab_alloc(s, gfpflags, node, addr, c);
2816 gfp_t gfpflags, int node, unsigned long addr)
2863 if (unlikely(!object || !page || !node_match(page, node))) {
2864 object = __slab_alloc(s, gfpflags, node, addr, c);
2933 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2935 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2938 s->object_size, s->size, gfpflags, node);
2947 int node, size_t size)
2949 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2952 size, s->size, gfpflags, node);
3518 * slab on the node for this slabcache. There are no concurrent accesses
3523 * memory on a fresh node that has no slab structures yet.
3525 static void early_kmem_cache_node_alloc(int node)
3532 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3535 if (page_to_nid(page) != node) {
3536 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3537 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3551 kmem_cache_node->node[node] = n;
3553 inc_slabs_node(kmem_cache_node, node, page->objects);
3564 int node;
3567 for_each_kmem_cache_node(s, node, n) {
3568 s->node[node] = NULL;
3582 int node;
3584 for_each_node_state(node, N_NORMAL_MEMORY) {
3588 early_kmem_cache_node_alloc(node);
3592 GFP_KERNEL, node);
3600 s->node[node] = n;
3624 * per node partial lists and therefore no locking will be required.
3629 * per node list when we reach the limit.
3631 * per node list when we run out of per cpu objects. We only fetch
3870 * Attempt to free all partial slabs on a node.
3898 int node;
3901 for_each_kmem_cache_node(s, node, n)
3902 if (n->nr_partial || slabs_node(s, node))
3912 int node;
3917 for_each_kmem_cache_node(s, node, n) {
3919 if (n->nr_partial || slabs_node(s, node))
3981 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3988 page = alloc_pages_node(node, flags, order);
3998 void *__kmalloc_node(size_t size, gfp_t flags, int node)
4004 ret = kmalloc_large_node(size, flags, node);
4008 flags, node);
4018 ret = slab_alloc_node(s, flags, node, _RET_IP_);
4020 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4145 int node;
4156 for_each_kmem_cache_node(s, node, n) {
4198 if (slabs_node(s, node))
4227 * If the node still has available memory. we need kmem_cache_node
4238 * if n->nr_slabs > 0, slabs still exist on the node
4245 s->node[offline_node] = NULL;
4261 * If the node's memory is already available, then kmem_cache_node is
4268 * We are bringing a node online. No memory is available yet. We must
4269 * allocate a kmem_cache_node structure in order to bring the node
4276 * since memory is not yet available from the node that
4285 s->node[nid] = n;
4336 int node;
4348 for_each_kmem_cache_node(s, node, n) {
4379 /* Able to allocate the per node structures */
4383 offsetof(struct kmem_cache, node) +
4479 int node, unsigned long caller)
4485 ret = kmalloc_large_node(size, gfpflags, node);
4489 gfpflags, node);
4499 ret = slab_alloc_node(s, gfpflags, node, caller);
4502 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4582 int node;
4587 for_each_kmem_cache_node(s, node, n)
4738 int node;
4748 for_each_kmem_cache_node(s, node, n) {
4909 int node;
4923 int node;
4930 node = page_to_nid(page);
4939 nodes[node] += x;
4943 node = page_to_nid(page);
4951 nodes[node] += x;
4964 * unplug code doesn't destroy the kmem_cache->node[] data.
4971 for_each_kmem_cache_node(s, node, n) {
4981 nodes[node] += x;
4989 for_each_kmem_cache_node(s, node, n) {
4997 nodes[node] += x;
5002 for (node = 0; node < nr_node_ids; node++)
5003 if (nodes[node])
5005 node, nodes[node]);
5748 int node;
5751 for_each_kmem_cache_node(s, node, n) {