/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | ref-verify.c | 440 struct extent_buffer *leaf = path->nodes[0]; in process_extent_item() 505 struct extent_buffer *leaf = path->nodes[0]; in process_leaf() 564 eb = btrfs_read_node_slot(path->nodes[level], in walk_down_tree() 569 path->nodes[level-1] = eb; in walk_down_tree() 589 if (!path->nodes[l]) in walk_up_tree() 594 btrfs_header_nritems(path->nodes[l])) { in walk_up_tree() 599 btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]); in walk_up_tree() 600 free_extent_buffer(path->nodes[l]); in walk_up_tree() 601 path->nodes[l] = NULL; in walk_up_tree() 999 path->nodes[leve in btrfs_build_ref_tree() [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | mmzone.h | 1276 * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted. 1302 * node_zonelists contains references to all zones in all nodes. 1614 * for_each_online_pgdat - helper macro to iterate over all online nodes 1658 nodemask_t *nodes); 1664 * @nodes: An optional nodemask to filter the zonelist with 1677 nodemask_t *nodes) in next_zones_zonelist() 1679 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) in next_zones_zonelist() 1681 return __next_zones_zonelist(z, highest_zoneidx, nodes); in next_zones_zonelist() 1688 * @nodes: An optional nodemask to filter the zonelist with 1703 nodemask_t *nodes) in first_zones_zonelist() 1675 next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes) next_zones_zonelist() argument 1701 first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, nodemask_t *nodes) first_zones_zonelist() argument 1746 movable_only_nodes(nodemask_t *nodes) movable_only_nodes() argument [all...] |
/kernel/linux/linux-6.6/drivers/interconnect/ |
H A D | core.c | 55 list_for_each_entry(n, &provider->nodes, node_list) { in icc_summary_show() 117 /* draw nodes */ in icc_graph_show() 118 list_for_each_entry(n, &provider->nodes, node_list) in icc_graph_show() 122 list_for_each_entry(n, &provider->nodes, node_list) in icc_graph_show() 133 list_for_each_entry(n, &provider->nodes, node_list) in icc_graph_show() 157 list_for_each_entry(n, &provider->nodes, node_list) { in node_find_by_name() 342 * multiple interconnect nodes. A single cell is used as an index into 343 * an array of icc nodes specified in the icc_onecell_data struct when 357 return icc_data->nodes[idx]; in of_icc_xlate_onecell() 880 * icc_link_create() - create a link between two nodes [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | mm.h | 21 struct list_head nodes; member 48 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_heap_size()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | mm.h | 21 struct list_head nodes; member 48 list_for_each_entry(node, &mm->nodes, nl_entry) { in nvkm_mm_heap_size()
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | bcache.h | 298 TP_PROTO(unsigned nodes), 299 TP_ARGS(nodes), 302 __field(unsigned, nodes ) 306 __entry->nodes = nodes; 309 TP_printk("coalesced %u nodes", __entry->nodes)
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_psp.c | 1348 if (mirror_top_info->nodes[j].node_id != src_node_id) in psp_xgmi_reflect_topology_info() 1351 mirror_top_info->nodes[j].num_hops = dst_num_hops; in psp_xgmi_reflect_topology_info() 1358 mirror_top_info->nodes[j].num_links = dst_num_links; in psp_xgmi_reflect_topology_info() 1393 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; in psp_xgmi_get_topology_info() 1394 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; in psp_xgmi_get_topology_info() 1395 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; in psp_xgmi_get_topology_info() 1396 topology_info_input->nodes[ in psp_xgmi_get_topology_info() [all...] |
/kernel/linux/linux-5.10/drivers/base/ |
H A D | swnode.c | 3 * Software nodes for the firmware node framework. 695 * software_node_register_nodes - Register an array of software nodes 696 * @nodes: Zero terminated array of software nodes to be registered 698 * Register multiple software nodes at once. 700 int software_node_register_nodes(const struct software_node *nodes) in software_node_register_nodes() argument 705 for (i = 0; nodes[i].name; i++) { in software_node_register_nodes() 706 ret = software_node_register(&nodes[i]); in software_node_register_nodes() 708 software_node_unregister_nodes(nodes); in software_node_register_nodes() 718 * software_node_unregister_nodes - Unregister an array of software nodes 728 software_node_unregister_nodes(const struct software_node *nodes) software_node_unregister_nodes() argument [all...] |
/kernel/linux/linux-5.10/fs/ubifs/ |
H A D | gc.c | 14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which 15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete 16 * nodes to the journal, at which point the garbage-collected LEB is free to be 17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes 19 * to be reused. Garbage collection will cause the number of dirty index nodes 33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, 34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would 35 * have to waste large pieces of free space at the end of LEB B, because nodes 36 * from LEB A would not fit. And the worst situation is when all nodes ar [all...] |
/kernel/linux/linux-6.6/fs/ubifs/ |
H A D | gc.c | 14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which 15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete 16 * nodes to the journal, at which point the garbage-collected LEB is free to be 17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes 19 * to be reused. Garbage collection will cause the number of dirty index nodes 33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, 34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would 35 * have to waste large pieces of free space at the end of LEB B, because nodes 36 * from LEB A would not fit. And the worst situation is when all nodes ar [all...] |
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | btree.c | 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 367 * flush, and writes appending to leaf nodes aren't blocking anything so in do_btree_node_write() 668 * It's _really_ critical that we don't free too many btree nodes - we in bch_mca_scan() 883 * We can only have one thread cannibalizing other cached btree nodes at a time, 911 * the list. Check if there's any freed nodes there: in mca_alloc() 1210 * ptr_invalid() can't return true for the keys that mark btree nodes as in __bch_btree_mark_key() 1289 gc->nodes++; in btree_gc_mark_node() 1338 unsigned int i, nodes = 0, keys = 0, blocks; in btree_gc_coalesce() local 1352 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes] in btree_gc_coalesce() [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | btree.c | 17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather 367 * flush, and writes appending to leaf nodes aren't blocking anything so in do_btree_node_write() 689 * It's _really_ critical that we don't free too many btree nodes - we in bch_mca_scan() 904 * We can only have one thread cannibalizing other cached btree nodes at a time, 932 * the list. Check if there's any freed nodes there: in mca_alloc() 1231 * ptr_invalid() can't return true for the keys that mark btree nodes as in __bch_btree_mark_key() 1310 gc->nodes++; in btree_gc_mark_node() 1359 unsigned int i, nodes = 0, keys = 0, blocks; in btree_gc_coalesce() local 1373 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes] in btree_gc_coalesce() [all...] |
/kernel/linux/linux-5.10/arch/x86/kernel/apic/ |
H A D | apic_numachip.c | 173 u32 nodes = 1; in fixup_cpu_id() local 177 /* Account for nodes per socket in multi-core-module processors */ in fixup_cpu_id() 180 nodes = ((val >> 3) & 7) + 1; in fixup_cpu_id() 183 c->phys_proc_id = node / nodes; in fixup_cpu_id()
|
/kernel/linux/linux-6.6/arch/x86/kernel/apic/ |
H A D | apic_numachip.c | 162 u32 nodes = 1; in fixup_cpu_id() local 166 /* Account for nodes per socket in multi-core-module processors */ in fixup_cpu_id() 169 nodes = ((val >> 3) & 7) + 1; in fixup_cpu_id() 172 c->phys_proc_id = node / nodes; in fixup_cpu_id()
|
/kernel/linux/linux-6.6/drivers/interconnect/imx/ |
H A D | imx8mp.c | 185 static struct imx_icc_node_desc nodes[] = { variable 239 return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes); in imx8mp_icc_probe()
|
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | ref-verify.c | 437 struct extent_buffer *leaf = path->nodes[0]; in process_extent_item() 501 struct extent_buffer *leaf = path->nodes[0]; in process_leaf() 563 block_bytenr = btrfs_node_blockptr(path->nodes[level], in walk_down_tree() 565 gen = btrfs_node_ptr_generation(path->nodes[level], in walk_down_tree() 567 btrfs_node_key_to_cpu(path->nodes[level], &first_key, in walk_down_tree() 579 path->nodes[level-1] = eb; in walk_down_tree() 598 if (!path->nodes[l]) in walk_up_tree() 603 btrfs_header_nritems(path->nodes[l])) { in walk_up_tree() 608 btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]); in walk_up_tree() 609 free_extent_buffer(path->nodes[ in walk_up_tree() [all...] |
H A D | delayed-inode.h | 27 * Used for delayed nodes which is waiting to be dealt with by the 34 int nodes; /* for delayed nodes */ member 49 * Used to add the node into the prepare list, the nodes in this list 81 delayed_root->nodes = 0; in btrfs_init_delayed_root()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device.c | 581 knode = kfd->nodes[i]; in kfd_cleanup_nodes() 588 kfd->nodes[i] = NULL; in kfd_cleanup_nodes() 649 "KFD num nodes cannot be 0, num_xcc_in_node: %d\n", in kgd2kfd_device_init() 758 dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", in kgd2kfd_device_init() 761 /* Allocate the KFD nodes */ in kgd2kfd_device_init() 825 kfd->nodes[i] = node; in kgd2kfd_device_init() 860 /* Cleanup KFD nodes */ in kgd2kfd_device_exit() 881 node = kfd->nodes[i]; in kgd2kfd_pre_reset() 889 kfd_signal_reset_event(kfd->nodes[i]); in kgd2kfd_pre_reset() 910 ret = kfd_resume(kfd->nodes[ in kgd2kfd_post_reset() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_psp.c | 866 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; in psp_xgmi_get_topology_info() 867 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; in psp_xgmi_get_topology_info() 868 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; in psp_xgmi_get_topology_info() 869 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; in psp_xgmi_get_topology_info() 881 topology->nodes[i].node_id = topology_info_output->nodes[ in psp_xgmi_get_topology_info() [all...] |
/kernel/linux/linux-5.10/drivers/base/regmap/ |
H A D | regcache-rbtree.c | 140 int nodes = 0; in rbtree_show() local 159 nodes++; in rbtree_show() 163 if (nodes) in rbtree_show() 164 average = registers / nodes; in rbtree_show() 168 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", in rbtree_show() 169 nodes, registers, average, mem_size); in rbtree_show()
|
/kernel/linux/linux-6.6/drivers/base/regmap/ |
H A D | regcache-rbtree.c | 140 int nodes = 0; in rbtree_show() local 159 nodes++; in rbtree_show() 163 if (nodes) in rbtree_show() 164 average = registers / nodes; in rbtree_show() 168 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", in rbtree_show() 169 nodes, registers, average, mem_size); in rbtree_show()
|
/kernel/linux/linux-6.6/mm/ |
H A D | workingset.c | 658 * create excessive amounts of shadow nodes. To keep a lid on this, 659 * track shadow nodes and reclaim them when they grow way past the 670 * Track non-empty nodes that contain only shadow entries; in workingset_update_node() 673 * Avoid acquiring the list_lru lock when the nodes are in workingset_update_node() 697 unsigned long nodes; in count_shadow_nodes() local 700 nodes = list_lru_shrink_count(&shadow_nodes, sc); in count_shadow_nodes() 701 if (!nodes) in count_shadow_nodes() 705 * Approximate a reasonable limit for the nodes in count_shadow_nodes() 748 if (nodes <= max_nodes) in count_shadow_nodes() 750 return nodes in count_shadow_nodes() [all...] |
/kernel/linux/linux-6.6/Documentation/sphinx/ |
H A D | kernel_feat.py | 40 from docutils import nodes, statemachine namespace 117 node = nodes.section()
|
H A D | kernel_abi.py | 42 from docutils import nodes, statemachine namespace 97 node = nodes.section()
|
/kernel/linux/linux-5.10/drivers/md/persistent-data/ |
H A D | dm-btree-internal.h | 67 struct dm_block *nodes[2]; member 80 struct dm_block *nodes[2]; member
|