Lines Matching defs:node

85  *	Modified the slab allocator to be node aware on NUMA systems.
86 * Each node has its own list of partial, free and full slabs.
87 * All object allocations for a node occur from node specific slab lists.
202 * Need this for bootstrapping a per node allocator.
212 int node, struct list_head *list);
492 int node = __this_cpu_read(slab_reap_node);
494 node = next_node_in(node, node_online_map);
495 __this_cpu_write(slab_reap_node, node);
532 static struct array_cache *alloc_arraycache(int node, int entries,
538 ac = kmalloc_node(memsize, gfp, node);
606 static inline struct alien_cache **alloc_alien_cache(int node,
643 static struct alien_cache *__alloc_alien_cache(int node, int entries,
649 alc = kmalloc_node(memsize, gfp, node);
658 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
665 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
670 if (i == node || !node_online(i))
672 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
695 struct array_cache *ac, int node,
698 struct kmem_cache_node *n = get_node(cachep, node);
710 free_block(cachep, ac->entry, ac->avail, node, list);
721 int node = __this_cpu_read(slab_reap_node);
724 struct alien_cache *alc = n->alien[node];
732 __drain_alien_cache(cachep, ac, node, &list);
763 int node, int page_node)
770 n = get_node(cachep, node);
796 int node = numa_mem_id();
798 * Make sure we are not freeing a object from another node to the array
801 if (likely(node == page_node))
804 return __cache_free_alien(cachep, objp, node, page_node);
808 * Construct gfp mask to allocate from a specific node but do not reclaim or
817 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
824 * node has not already allocated this
826 n = get_node(cachep, node);
829 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
836 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
845 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
852 cachep->node[node] = n;
859 * Allocates and initializes node for a node on each slab cache, used for
861 * will be allocated off-node since memory is not yet online for the new node.
862 * When hotplugging memory or a cpu, existing node are not replaced if
867 static int init_cache_node_node(int node)
873 ret = init_cache_node(cachep, node, GFP_KERNEL);
883 int node, gfp_t gfp, bool force_change)
893 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
899 new_shared = alloc_arraycache(node,
905 ret = init_cache_node(cachep, node, gfp);
909 n = get_node(cachep, node);
913 n->shared->avail, node, &list);
954 int node = cpu_to_mem(cpu);
955 const struct cpumask *mask = cpumask_of_node(node);
963 n = get_node(cachep, node);
974 free_block(cachep, nc->entry, nc->avail, node, &list);
985 shared->avail, node, &list);
1009 n = get_node(cachep, node);
1019 int node = cpu_to_mem(cpu);
1028 err = init_cache_node_node(node);
1037 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1062 * Even if all the cpus of a node are down, we don't free the
1064 * a kmalloc allocation from another cpu for memory from the node of
1099 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1100 * Returns -EBUSY if all objects cannot be drained so that the node is not
1105 static int __meminit drain_cache_node_node(int node)
1113 n = get_node(cachep, node);
1179 cachep->node[nodeid] = ptr;
1188 int node;
1190 for_each_online_node(node) {
1191 cachep->node[node] = &init_kmem_cache_node[index + node];
1192 cachep->node[node]->next_reap = jiffies +
1248 offsetof(struct kmem_cache, node) +
1300 * node.
1332 int node;
1339 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1344 for_each_kmem_cache_node(cachep, node, n) {
1353 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1354 node, total_slabs - free_slabs, total_slabs,
1768 int node;
1770 for_each_online_node(node) {
1771 cachep->node[node] = kmalloc_node(
1772 sizeof(struct kmem_cache_node), gfp, node);
1773 BUG_ON(!cachep->node[node]);
1774 kmem_cache_node_init(cachep->node[node]);
1778 cachep->node[numa_mem_id()]->next_reap =
2112 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2116 assert_spin_locked(&get_node(cachep, node)->list_lock);
2129 int node, bool free_all, struct list_head *list)
2140 free_block(cachep, ac->entry, tofree, node, list);
2149 int node = numa_mem_id();
2155 n = get_node(cachep, node);
2157 free_block(cachep, ac->entry, ac->avail, node, &list);
2166 int node;
2171 for_each_kmem_cache_node(cachep, node, n)
2175 for_each_kmem_cache_node(cachep, node, n) {
2177 drain_array_locked(cachep, n->shared, node, true, &list);
2226 int node;
2229 for_each_kmem_cache_node(s, node, n)
2239 int node;
2245 for_each_kmem_cache_node(cachep, node, n) {
2268 /* NUMA: free the node structures */
2273 cachep->node[i] = NULL;
2902 int node;
2907 node = numa_mem_id();
2919 n = get_node(cachep, node);
2963 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3066 * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
3089 * certain node and fall back is permitted. First we scan all the
3090 * available node for available objects. If that fails then we
3091 * perform an allocation without specifying a node. This allows the page
3116 * from existing per node queues.
3272 * We may just have run out of memory on the local node.
3322 int nr_objects, int node, struct list_head *list)
3325 struct kmem_cache_node *n = get_node(cachep, node);
3338 check_spinlock_acquired_node(cachep, node);
3369 int node = numa_mem_id();
3375 n = get_node(cachep, node);
3390 free_block(cachep, ac->entry, batchcount, node, &list);
3561 * kmem_cache_alloc_node - Allocate an object on the specified node
3564 * @nodeid: node number of the target node.
3567 * node, which can improve the performance for cpu bound structures.
3569 * Fallback to other node is possible if __GFP_THISNODE is not set.
3605 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3615 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3621 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3623 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3628 int node, unsigned long caller)
3630 return __do_kmalloc_node(size, flags, node, caller);
3768 int node;
3771 for_each_online_node(node) {
3772 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3783 node--;
3784 while (node >= 0) {
3785 n = get_node(cachep, node);
3790 cachep->node[node] = NULL;
3792 node--;
3828 int node;
3832 node = cpu_to_mem(cpu);
3833 n = get_node(cachep, node);
3835 free_block(cachep, ac->entry, ac->avail, node, &list);
3911 * Drain an array if it contains any elements taking the node lock only if
3912 * necessary. Note that the node listlock also protects the array_cache
3916 struct array_cache *ac, int node)
3932 drain_array_locked(cachep, ac, node, false, &list);
3954 int node = numa_mem_id();
3965 * We only take the node lock if absolutely necessary and we
3969 n = get_node(searchp, node);
3973 drain_array(searchp, n, cpu_cache_get(searchp), node);
3984 drain_array(searchp, n, n->shared, node);
4012 int node;
4015 for_each_kmem_cache_node(cachep, node, n) {
4047 { /* node stats */