Lines Matching defs:node

53  * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes.
55 * node distances:
56 * node 0 1 2 3
72 * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node.
74 * node distances:
75 * node 0 1 2
88 * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node.
90 * node distances:
91 * node 0 1 2
225 static struct memory_tier *__node_get_memory_tier(int node)
229 pgdat = NODE_DATA(node);
242 bool node_is_toptier(int node)
248 pgdat = NODE_DATA(node);
286 * next_demotion_node() - Get the next node in the demotion path
287 * @node: The starting node to lookup the next node
289 * Return: node id for next memory node in the demotion path hierarchy
290 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
291 * @node online or guarantee that it *continues* to be the next demotion
294 int next_demotion_node(int node)
302 nd = &node_demotion[node];
314 * target node randomly.
317 * target node, but we should introduce another variable
318 * for node_demotion[] to record last selected target node,
320 * last target node. Or introducing per-cpu data to avoid
322 * target node randomly seems better until now.
333 int node;
335 for_each_node_state(node, N_MEMORY) {
336 node_demotion[node].preferred = NODE_MASK_NONE;
341 memtier = __node_get_memory_tier(node);
363 int target = NUMA_NO_NODE, node;
374 for_each_node_state(node, N_MEMORY) {
376 nd = &node_demotion[node];
378 memtier = __node_get_memory_tier(node);
382 * Get the lower memtier to find the demotion node list.
389 * nodelist to skip list so that we find the best node from the
395 * Find all the nodes in the memory tier node list of same best distance.
400 target = find_next_best_node(node, &tier_nodes);
404 distance = node_distance(node, target);
417 * if any node that is part of the memory tier have CPUs.
435 * Now build the lower_tier mask for each node collecting node mask from
438 * perferred node.
457 static inline void __init_node_memory_type(int node, struct memory_dev_type *memtype)
459 if (!node_memory_types[node].memtype)
460 node_memory_types[node].memtype = memtype;
462 * for each device getting added in the same NUMA node
465 * changing a node memtype can be done by droping the
469 if (node_memory_types[node].memtype == memtype) {
470 if (!node_memory_types[node].map_count++)
475 static struct memory_tier *set_node_memory_tier(int node)
479 pg_data_t *pgdat = NODE_DATA(node);
484 if (!node_state(node, N_MEMORY))
487 __init_node_memory_type(node, default_dram_type);
489 memtype = node_memory_types[node].memtype;
490 node_set(node, memtype->nodes);
503 static bool clear_node_memory_tier(int node)
509 pgdat = NODE_DATA(node);
521 memtier = __node_get_memory_tier(node);
527 memtype = node_memory_types[node].memtype;
528 node_clear(node, memtype->nodes);
569 void init_node_memory_type(int node, struct memory_dev_type *memtype)
573 __init_node_memory_type(node, memtype);
578 void clear_node_memory_type(int node, struct memory_dev_type *memtype)
581 if (node_memory_types[node].memtype == memtype)
582 node_memory_types[node].map_count--;
584 * If we umapped all the attached devices to this node,
585 * clear the node memory type.
587 if (!node_memory_types[node].map_count) {
588 node_memory_types[node].memtype = NULL;
602 * Only update the node migration order when a node is
629 int ret, node;
655 for_each_node_state(node, N_MEMORY) {
656 memtier = set_node_memory_tier(node);