Lines Matching defs:nid

1296 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1298 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1301 static inline unsigned long task_faults(struct task_struct *p, int nid)
1307 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1310 static inline unsigned long group_faults(struct task_struct *p, int nid)
1318 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1321 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1323 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1359 static bool numa_is_active_node(int nid, struct numa_group *ng)
1361 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1365 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, int maxdist, bool task)
1385 int dist = node_distance(nid, node);
1388 * for placement; nid was already counted.
1390 if (dist == sched_max_numa_distance || node == nid) {
1437 static inline unsigned long task_weight(struct task_struct *p, int nid, int dist)
1451 faults = task_faults(p, nid);
1452 faults += score_nearby_nodes(p, nid, dist, true);
1457 static inline unsigned long group_weight(struct task_struct *p, int nid, int dist)
1472 faults = group_faults(p, nid);
1473 faults += score_nearby_nodes(p, nid, dist, false);
1668 static void update_numa_stats(struct task_numa_env *env, struct numa_stats *ns, int nid, bool find_idle)
1676 for_each_cpu(cpu, cpumask_of_node(nid))
1700 ns->weight = cpumask_weight(cpumask_of_node(nid));
2060 int nid, ret, dist;
2097 /* Try to find a spot on the preferred nid. */
2109 for_each_online_node(nid)
2111 if (nid == env.src_nid || nid == p->numa_preferred_nid) {
2122 taskimp = task_weight(p, nid, dist) - taskweight;
2123 groupimp = group_weight(p, nid, dist) - groupweight;
2129 env.dst_nid = nid;
2145 nid = env.src_nid;
2147 nid = cpu_to_node(env.best_cpu);
2150 if (nid != p->numa_preferred_nid) {
2151 sched_setnuma(p, nid);
2213 int nid, active_nodes = 0;
2215 for_each_online_node(nid)
2217 faults = group_faults_cpu(numa_group, nid);
2223 for_each_online_node(nid)
2225 faults = group_faults_cpu(numa_group, nid);
2354 * Determine the preferred nid for a task in a numa_group. This needs to
2358 static int preferred_group_nid(struct task_struct *p, int nid)
2365 return nid;
2375 int node, max_node = nid;
2391 * Finding the preferred nid in a system with NUMA backplane
2433 * winner is the preferred nid.
2435 nid = a;
2444 return nid;
2449 int seq, nid, max_nid = NUMA_NO_NODE;
2480 for_each_online_node(nid)
2490 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2491 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2492 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2493 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2521 * nid and priv in a specific region because it
2534 max_nid = nid;
2538 max_nid = nid;
2996 /* New address space, reset the preferred nid */