| /kernel/linux/linux-5.10/fs/btrfs/ |
| H A D | dir-item.c | 43 leaf = path->nodes[0]; in insert_with_overflow() 83 leaf = path->nodes[0]; in btrfs_insert_xattr_item() 95 btrfs_mark_buffer_dirty(path->nodes[0]); in btrfs_insert_xattr_item() 144 leaf = path->nodes[0]; in btrfs_insert_dir_item() 249 leaf = path->nodes[0]; in btrfs_check_dir_item_collision() 314 leaf = path->nodes[0]; in btrfs_search_dir_index_item() 324 leaf = path->nodes[0]; in btrfs_search_dir_index_item() 382 leaf = path->nodes[0]; in btrfs_match_dir_item_name() 418 leaf = path->nodes[0]; in btrfs_delete_one_dir_name()
|
| H A D | free-space-tree.c | 68 leaf = path->nodes[0]; in add_new_free_space_info() 106 return btrfs_item_ptr(path->nodes[0], path->slots[0], in search_free_space_info() 223 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 266 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 305 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 362 leaf = path->nodes[0]; in convert_free_space_to_extents() 411 leaf = path->nodes[0]; in convert_free_space_to_extents() 476 flags = btrfs_free_space_flags(path->nodes[0], info); in update_free_space_extent_count() 477 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); in update_free_space_extent_count() 480 btrfs_set_free_space_extent_count(path->nodes[ in update_free_space_extent_count() [all...] |
| /kernel/linux/linux-6.6/drivers/clk/zynqmp/ |
| H A D | clkc.c | 65 * @node: Clock topology nodes 66 * @num_nodes: Number of nodes present in topology 125 const struct clock_topology *nodes) 255 * other nodes, master should call same API in loop with new 257 * index 0 which will return nodes 0,1 and 2. Next call, index 258 * should be 3 which will return nodes 3,4 and 5 and so on. 306 * @nodes: Clock topology node 313 const struct clock_topology *nodes) in zynqmp_clk_register_fixed_factor() 332 flag = zynqmp_clk_map_common_ccf_flags(nodes->flag); in zynqmp_clk_register_fixed_factor() 406 * @nnodes: Number of nodes 310 zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id, const char * const *parents, u8 num_parents, const struct clock_topology *nodes) zynqmp_clk_register_fixed_factor() argument 593 struct clock_topology *nodes; zynqmp_register_clk_topology() local [all...] |
| /kernel/linux/linux-6.6/mm/ |
| H A D | memory-tiers.c | 23 /* All the nodes that are part of all the lower memory tiers. */ 53 * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes. 72 * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node. 88 * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node. 115 nodemask_t nodes = NODE_MASK_NONE; in get_memtier_nodemask() local 119 nodes_or(nodes, nodes, memtype->nodes); in get_memtier_nodemask() 121 return nodes; in get_memtier_nodemask() [all...] |
| /third_party/node/deps/cares/src/lib/ |
| H A D | ares_getaddrinfo.c | 277 status = ares_append_ai_node(AF_INET, port, 0, &addr4, &ai->nodes); in fake_addrinfo() 291 status = ares_append_ai_node(AF_INET6, port, 0, &addr6, &ai->nodes); in fake_addrinfo() 320 ai->nodes->ai_socktype = hints->ai_socktype; in fake_addrinfo() 321 ai->nodes->ai_protocol = hints->ai_protocol; in fake_addrinfo() 333 if (!(hquery->hints.ai_flags & ARES_AI_NOSORT) && hquery->ai->nodes) { in end_hquery() 334 sentinel.ai_next = hquery->ai->nodes; in end_hquery() 336 hquery->ai->nodes = sentinel.ai_next; in end_hquery() 338 next = hquery->ai->nodes; in end_hquery() 511 if (addinfostatus == ARES_EBADRESP && hquery->ai->nodes) { in host_callback() 518 } else if (hquery->ai->nodes) { in host_callback() [all...] |
| /base/useriam/user_auth_framework/services/core/src/ |
| H A D | schedule_node_helper.cpp | 28 std::shared_ptr<ScheduleNodeCallback> callback, std::vector<std::shared_ptr<ScheduleNode>> &nodes) in BuildFromHdi() 31 return BuildFromHdi(infos, callback, nodes, para); in BuildFromHdi() 35 std::shared_ptr<ScheduleNodeCallback> callback, std::vector<std::shared_ptr<ScheduleNode>> &nodes, in BuildFromHdi() 49 nodes.swap(outputs); in BuildFromHdi() 27 BuildFromHdi(const std::vector<HdiScheduleInfo> &infos, std::shared_ptr<ScheduleNodeCallback> callback, std::vector<std::shared_ptr<ScheduleNode>> &nodes) BuildFromHdi() argument 34 BuildFromHdi(const std::vector<HdiScheduleInfo> &infos, std::shared_ptr<ScheduleNodeCallback> callback, std::vector<std::shared_ptr<ScheduleNode>> &nodes, const NodeOptionalPara ¶) BuildFromHdi() argument
|
| /foundation/graphic/graphic_3d/lume/scenewidgetplugin/plugin/src/ |
| H A D | hierarchy_controller.cpp | 129 BASE_NS::vector<INode::Ptr> nodes; in GetAllNodes() local 130 auto add = [&nodes](const INode::Ptr& node) { nodes.push_back(node); }; in GetAllNodes() 135 return nodes; in GetAllNodes()
|
| /kernel/linux/linux-6.6/drivers/interconnect/ |
| H A D | icc-clk.c | 87 onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL); in icc_clk_register() 103 INIT_LIST_HEAD(&provider->nodes); in icc_clk_register() 122 onecell->nodes[j++] = node; in icc_clk_register() 133 onecell->nodes[j++] = node; in icc_clk_register()
|
| /third_party/ninja/src/ |
| H A D | missing_deps_test.cc | 53 std::vector<Node*> nodes = state_.RootNodes(&err);
in ProcessAllNodes() local 55 for (std::vector<Node*>::iterator it = nodes.begin(); it != nodes.end();
in ProcessAllNodes() 165 std::vector<Node*> nodes = state_.RootNodes(&err);
in TEST_F() local
|
| /third_party/libdrm/tests/ |
| H A D | drmdevice.c | 40 printf("+-> nodes\n"); in print_device_info() 43 printf("| +-> nodes[%d] %s\n", j, device->nodes[j]); in print_device_info() 144 printf("--- Opening device node %s ---\n", devices[i]->nodes[j]); in main() 145 fd = open(devices[i]->nodes[j], O_RDONLY | O_CLOEXEC, 0); in main() 151 printf("--- Retrieving device info, for node %s ---\n", devices[i]->nodes[j]); in main()
|
| /third_party/python/Lib/lib2to3/fixes/ |
| H A D | fix_urllib.py | 160 nodes = [] 162 nodes.extend([new_node, Newline()]) 163 nodes.append(new_nodes[-1]) 164 node.replace(nodes)
|
| /third_party/skia/src/core/ |
| H A D | SkRTree.cpp | 56 // This function parallels bulkLoad, but just counts how many nodes bulkLoad would allocate. 72 int nodes = 0; in CountNodes() local 84 nodes++; in CountNodes() 90 return nodes + CountNodes(nodes); in CountNodes() 107 // If the remainder isn't enough to fill a node, we'll add fewer nodes to other branches. in bulkLoad() 119 // if need be, omit some nodes to make up for remainder in bulkLoad()
|
| /kernel/linux/linux-5.10/drivers/interconnect/qcom/ |
| H A D | sm8250.c | 220 .nodes = aggre1_noc_nodes, 250 .nodes = aggre2_noc_nodes, 267 .nodes = compute_noc_nodes, 333 .nodes = config_noc_nodes, 349 .nodes = dc_noc_nodes, 383 .nodes = gem_noc_nodes, 399 .nodes = ipa_virt_nodes, 416 .nodes = mc_virt_nodes, 446 .nodes = mmss_noc_nodes, 472 .nodes [all...] |
| H A D | sm8150.c | 206 .nodes = aggre1_noc_nodes, 241 .nodes = aggre2_noc_nodes, 259 .nodes = camnoc_virt_nodes, 276 .nodes = compute_noc_nodes, 344 .nodes = config_noc_nodes, 360 .nodes = dc_noc_nodes, 395 .nodes = gem_noc_nodes, 411 .nodes = ipa_virt_nodes, 428 .nodes = mc_virt_nodes, 458 .nodes [all...] |
| /kernel/linux/linux-5.10/lib/ |
| H A D | objagg.c | 506 * only roots and leafs nodes. Leaf nodes are called deltas. 507 * But in general, this can be easily extended for intermediate nodes. 509 * nodes. 705 struct objagg_tmp_node *nodes; member 737 struct objagg_tmp_node *node = &graph->nodes[index]; in objagg_tmp_graph_node_weight() 741 /* Node weight is sum of node users and all other nodes users in objagg_tmp_graph_node_weight() 748 node = &graph->nodes[j]; in objagg_tmp_graph_node_weight() 765 node = &graph->nodes[i]; in objagg_tmp_graph_node_max_weight() 791 graph->nodes in objagg_tmp_graph_create() [all...] |
| /kernel/linux/linux-6.6/lib/ |
| H A D | objagg.c | 506 * only roots and leafs nodes. Leaf nodes are called deltas. 507 * But in general, this can be easily extended for intermediate nodes. 509 * nodes. 705 struct objagg_tmp_node *nodes; member 737 struct objagg_tmp_node *node = &graph->nodes[index]; in objagg_tmp_graph_node_weight() 741 /* Node weight is sum of node users and all other nodes users in objagg_tmp_graph_node_weight() 748 node = &graph->nodes[j]; in objagg_tmp_graph_node_weight() 765 node = &graph->nodes[i]; in objagg_tmp_graph_node_max_weight() 790 graph->nodes in objagg_tmp_graph_create() [all...] |
| H A D | group_cpus.c | 88 int n, nodes = 0; in get_nodes_in_cpumask() local 90 /* Calculate the number of nodes in the supplied affinity mask */ in get_nodes_in_cpumask() 94 nodes++; in get_nodes_in_cpumask() 97 return nodes; in get_nodes_in_cpumask() 164 * bigger than number of active numa nodes. Always start the in alloc_nodes_groups() 170 * other nodes will be allocated >= 1 groups, since 'numgrps' is in alloc_nodes_groups() 171 * bigger than number of numa nodes. in alloc_nodes_groups() 176 * 1) suppose there are two nodes: A and B in alloc_nodes_groups() 223 * For nodes >= 3, it can be thought as one node and another big in alloc_nodes_groups() 254 unsigned int i, n, nodes, cpus_per_gr in __group_cpus_evenly() local [all...] |
| /kernel/linux/linux-6.6/fs/btrfs/ |
| H A D | send.c | 100 * maple tree's internal nodes, is 24K. 166 * don't operate on stale extent buffers for nodes (level >= 1) or on 946 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], in get_inode_info() 948 info->size = btrfs_inode_size(path->nodes[0], ii); in get_inode_info() 949 info->gen = btrfs_inode_generation(path->nodes[0], ii); in get_inode_info() 950 info->mode = btrfs_inode_mode(path->nodes[0], ii); in get_inode_info() 951 info->uid = btrfs_inode_uid(path->nodes[0], ii); in get_inode_info() 952 info->gid = btrfs_inode_gid(path->nodes[0], ii); in get_inode_info() 953 info->rdev = btrfs_inode_rdev(path->nodes[0], ii); in get_inode_info() 954 info->nlink = btrfs_inode_nlink(path->nodes[ in get_inode_info() [all...] |
| H A D | free-space-tree.c | 87 leaf = path->nodes[0]; in add_new_free_space_info() 125 return btrfs_item_ptr(path->nodes[0], path->slots[0], in search_free_space_info() 242 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 285 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 323 leaf = path->nodes[0]; in convert_free_space_to_bitmaps() 379 leaf = path->nodes[0]; in convert_free_space_to_extents() 428 leaf = path->nodes[0]; in convert_free_space_to_extents() 493 flags = btrfs_free_space_flags(path->nodes[0], info); in update_free_space_extent_count() 494 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); in update_free_space_extent_count() 497 btrfs_set_free_space_extent_count(path->nodes[ in update_free_space_extent_count() [all...] |
| /kernel/linux/linux-5.10/kernel/irq/ |
| H A D | affinity.c | 86 int n, nodes = 0; in get_nodes_in_cpumask() local 88 /* Calculate the number of nodes in the supplied affinity mask */ in get_nodes_in_cpumask() 92 nodes++; in get_nodes_in_cpumask() 95 return nodes; in get_nodes_in_cpumask() 162 * bigger than number of active numa nodes. Always start the in alloc_nodes_vectors() 168 * other nodes will be allocated >= 1 vector, since 'numvecs' is in alloc_nodes_vectors() 169 * bigger than number of numa nodes. in alloc_nodes_vectors() 174 * 1) suppose there are two nodes: A and B in alloc_nodes_vectors() 221 * For nodes >= 3, it can be thought as one node and another big in alloc_nodes_vectors() 255 unsigned int i, n, nodes, cpus_per_ve in __irq_build_affinity_masks() local [all...] |
| /kernel/linux/linux-6.6/tools/testing/selftests/kvm/x86_64/ |
| H A D | xapic_ipi_test.c | 17 * amongst the available numa nodes on the machine. 257 int nodes = 0; in do_migrations() local 270 /* Get set of first 64 numa nodes available */ in do_migrations() 275 fprintf(stderr, "Numa nodes found amongst first %lu possible nodes " in do_migrations() 280 * available node. migrate_pages called below requires specifying nodes in do_migrations() 285 nodemasks[nodes] = nodemask & bit; in do_migrations() 286 nodes++; in do_migrations() 290 TEST_ASSERT(nodes > 1, in do_migrations() 291 "Did not find at least 2 numa nodes in do_migrations() [all...] |
| /third_party/python/Lib/test/ |
| H A D | test_graphlib.py | 12 nodes = ts.get_ready() 13 for node in nodes: 15 yield tuple(sorted(nodes)) 205 nodes = ts.get_ready() 206 ts.done(*nodes) 207 yield set(nodes)
|
| /third_party/selinux/libsepol/cil/src/ |
| H A D | cil_symtab.c | 67 cil_list_init(&datum->nodes, CIL_LIST_ITEM); in cil_symtab_datum_init() 72 cil_list_destroy(&datum->nodes, 0); in cil_symtab_datum_destroy() 78 if (datum && datum->nodes != NULL) { in cil_symtab_datum_remove_node() 79 cil_list_remove(datum->nodes, CIL_NODE, node, 0); in cil_symtab_datum_remove_node() 80 if (datum->nodes->head == NULL) { in cil_symtab_datum_remove_node() 97 cil_list_append(datum->nodes, CIL_NODE, node); in cil_symtab_insert()
|
| /kernel/linux/linux-6.6/tools/perf/util/ |
| H A D | cputopo.h | 46 struct numa_topology_node nodes[]; member 56 struct hybrid_topology_node nodes[]; member
|
| /third_party/ltp/testcases/kernel/syscalls/move_pages/ |
| H A D | move_pages_support.h | 36 int alloc_pages_on_nodes(void **pages, unsigned int num, int *nodes); 41 unsigned int num, int *nodes);
|