Lines Matching defs:ng

1165 static inline unsigned long group_faults_priv(struct numa_group *ng);
1166 static inline unsigned long group_faults_shared(struct numa_group *ng);
1210 struct numa_group *ng;
1214 ng = rcu_dereference(p->numa_group);
1215 if (ng) {
1216 unsigned long shared = group_faults_shared(ng);
1217 unsigned long private = group_faults_priv(ng);
1219 period *= refcount_read(&ng->refcount);
1232 struct numa_group *ng;
1238 ng = deref_curr_numa_group(p);
1239 if (ng) {
1240 unsigned long shared = group_faults_shared(ng);
1241 unsigned long private = group_faults_priv(ng);
1244 period *= refcount_read(&ng->refcount);
1277 struct numa_group *ng;
1281 ng = rcu_dereference(p->numa_group);
1282 if (ng) {
1283 gid = ng->gid;
1312 struct numa_group *ng = deref_task_numa_group(p);
1314 if (!ng) {
1318 return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1326 static inline unsigned long group_faults_priv(struct numa_group *ng)
1333 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1339 static inline unsigned long group_faults_shared(struct numa_group *ng)
1346 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1359 static bool numa_is_active_node(int nid, struct numa_group *ng)
1361 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1459 struct numa_group *ng = deref_task_numa_group(p);
1462 if (!ng) {
1466 total_faults = ng->total_faults;
1480 struct numa_group *ng = deref_curr_numa_group(p);
1524 if (!ng) {
1532 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * ACTIVE_NODE_FRACTION) {
1544 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * FAIR_THREE >
1545 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * FAIR_FOUR;
2058 struct numa_group *ng;
2107 ng = deref_curr_numa_group(p);
2108 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
2143 if (ng) {
2455 struct numa_group *ng;
2473 ng = deref_curr_numa_group(p);
2474 if (ng) {
2475 group_lock = &ng->lock;
2516 if (ng) {
2524 ng->faults[mem_idx] += diff;
2525 ng->faults_cpu[mem_idx] += f_diff;
2526 ng->total_faults += diff;
2527 group_faults += ng->faults[mem_idx];
2531 if (!ng) {
2542 if (ng) {
2543 numa_group_count_active_nodes(ng);
2736 struct numa_group *ng;
2780 ng = deref_curr_numa_group(p);
2781 if (!priv && !local && ng && ng->active_nodes > 1 && numa_is_active_node(cpu_node, ng) &&
2782 numa_is_active_node(mem_node, ng)) {
12131 struct numa_group *ng;
12134 ng = rcu_dereference(p->numa_group);
12141 if (ng) {
12142 gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];