Lines Matching defs:node

151 	int			node;		/* I: the associated node ID */
177 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
211 struct list_head pwqs_node; /* WR: node on wq->pwqs */
212 struct list_head mayday_node; /* MD: node on wq->maydays */
280 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
286 /* possible CPUs of each node */
409 list_for_each_entry((worker), &(pool)->workers, node) \
549 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
551 * @node: the node ID
558 * Return: The unbound pool_workqueue for @node.
561 int node)
566 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
571 if (unlikely(node == NUMA_NO_NODE))
574 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
1544 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1545 * @node: NUMA node ID that we want to select a CPU from
1548 * node. If there are no CPUs available on the given node it will return
1552 static int workqueue_select_cpu_near(int node)
1560 /* Delay binding to CPU if node is not valid or online */
1561 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1564 /* Use local node/cpu if we are already there */
1566 if (node == cpu_to_node(cpu))
1569 /* Use "random" otherwise know as "first" online CPU of node */
1570 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1577 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1578 * @node: NUMA node that we are targeting the work for
1582 * We queue the work to a "random" CPU within a given NUMA node. The basic
1584 * NUMA node.
1587 * the right NUMA node. If no node is requested or the requested node is
1591 * intersection of cpu_online_mask and the cpumask of the node, unless we
1592 * are running on the node. In that case we just use the current CPU.
1596 bool queue_work_node(int node, struct workqueue_struct *wq,
1605 * node instead of cycling through individual CPUs within the node.
1616 int cpu = workqueue_select_cpu_near(node);
1832 static struct worker *alloc_worker(int node)
1836 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1840 INIT_LIST_HEAD(&worker->node);
1872 list_add_tail(&worker->node, &pool->workers);
1893 list_del(&worker->node);
1930 worker = alloc_worker(pool->node);
1942 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
3441 pool->node = NUMA_NO_NODE;
3620 int node;
3633 /* if cpumask is contained inside a NUMA node, we belong to that node */
3635 for_each_node(node) {
3637 wq_numa_possible_cpumask[node])) {
3638 target_node = node;
3651 pool->node = target_node;
3831 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3842 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3844 * @node: the target NUMA node
3848 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3853 * enabled and @node has online CPUs requested by @attrs, the returned
3854 * cpumask is the intersection of the possible CPUs of @node and
3857 * The caller is responsible for ensuring that the cpumask of @node stays
3863 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3869 /* does @node have any online CPUs @attrs wants? */
3870 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3877 /* yeap, return possible CPUs in @node that @attrs wants */
3878 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3893 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3895 int node,
3906 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3907 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3924 int node;
3926 for_each_node(node)
3927 put_pwq_unlocked(ctx->pwq_tbl[node]);
3943 int node;
3980 for_each_node(node) {
3981 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3982 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3983 if (!ctx->pwq_tbl[node])
3987 ctx->pwq_tbl[node] = ctx->dfl_pwq;
4010 int node;
4018 for_each_node(node)
4019 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
4020 ctx->pwq_tbl[node]);
4076 * machines, this function maps a separate pwq to each NUMA node with
4078 * NUMA node it was issued on. Older pwqs are released as in-flight work
4116 * Note that when the last allowed CPU of a NUMA node goes offline for a
4127 int node = cpu_to_node(cpu);
4148 pwq = unbound_pwq_by_node(wq, node);
4156 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4173 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4181 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4282 * longer the case on NUMA machines due to per-node pools. While
4393 int node;
4460 for_each_node(node) {
4461 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4462 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4691 if (pool->node != NUMA_NO_NODE)
4692 pr_cont(" node=%d", pool->node);
5402 * pool_ids RO int : the associated pool IDs for each node
5463 int node, written = 0;
5467 for_each_node(node) {
5469 "%s%d:%d", delim, node,
5470 unbound_pwq_by_node(wq, node)->pool->id);
5919 int node, cpu;
5931 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5940 * We want masks of possible CPUs of each node which isn't readily
5947 for_each_node(node)
5948 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5949 node_online(node) ? node : NUMA_NO_NODE));
5952 node = cpu_to_node(cpu);
5953 cpumask_set_cpu(cpu, tbl[node]);
5993 pool->node = cpu_to_node(cpu);
6056 * CPU to node mapping may not be available that early on some
6058 * previously could be missing node hint and unbound pools NUMA
6069 pool->node = cpu_to_node(cpu);