/kernel/linux/linux-6.6/drivers/net/ethernet/engleder/ |
H A D | tsnep_selftests.c | 357 qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); in tsnep_test_taprio() 361 qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; in tsnep_test_taprio() 367 qopt->entries[0].gate_mask = 0x02; in tsnep_test_taprio() 368 qopt->entries[0].interval = 200000; in tsnep_test_taprio() 369 qopt->entries[1].gate_mask = 0x03; in tsnep_test_taprio() 370 qopt->entries[1].interval = 800000; in tsnep_test_taprio() 371 qopt->entries[2].gate_mask = 0x07; in tsnep_test_taprio() 372 qopt->entries[2].interval = 240000; in tsnep_test_taprio() 373 qopt->entries[3].gate_mask = 0x01; in tsnep_test_taprio() 374 qopt->entries[ in tsnep_test_taprio() [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | stackdepot.c | 63 unsigned long entries[]; /* Variable-sized array of frames */ member 126 unsigned long entries = 0; in stack_depot_early_init() local 145 * If stack_bucket_number_order is not set, leave entries as 0 to rely in stack_depot_early_init() 149 entries = 1UL << stack_bucket_number_order; in stack_depot_early_init() 153 entries, in stack_depot_early_init() 173 unsigned long entries; in stack_depot_init() local 186 entries = 1UL << stack_bucket_number_order; in stack_depot_init() 190 entries = nr_free_buffer_pages(); in stack_depot_init() 191 entries = roundup_pow_of_two(entries); in stack_depot_init() 260 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) depot_alloc_stack() argument 320 hash_stack(unsigned long *entries, unsigned int size) hash_stack() argument 343 find_stack(struct stack_record *bucket, unsigned long *entries, int size, u32 hash) find_stack() argument 358 __stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags, bool can_alloc) __stack_depot_save() argument 452 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) stack_depot_save() argument 460 stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) stack_depot_fetch() argument 500 unsigned long *entries; stack_depot_print() local 512 unsigned long *entries; stack_depot_snprint() local [all...] |
H A D | hashtable_test.c | 90 /* Both entries should have been visited exactly once. */ in hashtable_test_hash_add() 125 struct hashtable_test_entry entries[3]; in hashtable_test_hash_for_each() local 130 /* Add three entries to the hashtable. */ in hashtable_test_hash_for_each() 132 entries[i].key = i; in hashtable_test_hash_for_each() 133 entries[i].data = i + 10; in hashtable_test_hash_for_each() 134 entries[i].visited = 0; in hashtable_test_hash_for_each() 135 hash_add(hash, &entries[i].node, entries[i].key); in hashtable_test_hash_for_each() 149 KUNIT_EXPECT_EQ(test, entries[j].visited, 1); in hashtable_test_hash_for_each() 154 struct hashtable_test_entry entries[ in hashtable_test_hash_for_each_safe() local 187 struct hashtable_test_entry entries[4]; hashtable_test_hash_for_each_possible() local 241 struct hashtable_test_entry entries[4]; hashtable_test_hash_for_each_possible_safe() local [all...] |
H A D | list-test.c | 387 struct list_head entries[3], *cur; in list_test_list_cut_position() local 392 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 393 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 394 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 396 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position() 397 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 398 /* after: [list2] -> entries[0] -> entries[ in list_test_list_cut_position() 415 struct list_head entries[3], *cur; list_test_list_cut_before() local 443 struct list_head entries[5], *cur; list_test_list_splice() local 468 struct list_head entries[5], *cur; list_test_list_splice_tail() local 493 struct list_head entries[5], *cur; list_test_list_splice_init() local 520 struct list_head entries[5], *cur; list_test_list_splice_tail_init() local 643 struct list_head entries[3], *cur; list_test_list_for_each() local 661 struct list_head entries[3], *cur; list_test_list_for_each_prev() local 679 struct list_head entries[3], *cur, *n; list_test_list_for_each_safe() local 700 struct list_head entries[3], *cur, *n; list_test_list_for_each_prev_safe() local 720 struct list_test_struct entries[5], *cur; list_test_list_for_each_entry() local 741 struct list_test_struct entries[5], *cur; list_test_list_for_each_entry_reverse() local 1030 struct hlist_node entries[3], *cur; hlist_test_for_each() local 1049 struct hlist_node entries[3], *cur, *n; hlist_test_for_each_safe() local 1069 struct hlist_test_struct entries[5], *cur; hlist_test_for_each_entry() local 1092 struct hlist_test_struct entries[5], *cur; hlist_test_for_each_entry_continue() local 1123 struct hlist_test_struct entries[5], *cur; hlist_test_for_each_entry_from() local 1151 struct hlist_test_struct entries[5], *cur; hlist_test_for_each_entry_safe() local [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | list-test.c | 348 struct list_head entries[3], *cur; in list_test_list_cut_position() local 353 list_add_tail(&entries[0], &list1); in list_test_list_cut_position() 354 list_add_tail(&entries[1], &list1); in list_test_list_cut_position() 355 list_add_tail(&entries[2], &list1); in list_test_list_cut_position() 357 /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */ in list_test_list_cut_position() 358 list_cut_position(&list2, &list1, &entries[1]); in list_test_list_cut_position() 359 /* after: [list2] -> entries[0] -> entries[ in list_test_list_cut_position() 376 struct list_head entries[3], *cur; list_test_list_cut_before() local 404 struct list_head entries[5], *cur; list_test_list_splice() local 429 struct list_head entries[5], *cur; list_test_list_splice_tail() local 454 struct list_head entries[5], *cur; list_test_list_splice_init() local 481 struct list_head entries[5], *cur; list_test_list_splice_tail_init() local 584 struct list_head entries[3], *cur; list_test_list_for_each() local 602 struct list_head entries[3], *cur; list_test_list_for_each_prev() local 620 struct list_head entries[3], *cur, *n; list_test_list_for_each_safe() local 641 struct list_head entries[3], *cur, *n; list_test_list_for_each_prev_safe() local 661 struct list_test_struct entries[5], *cur; list_test_list_for_each_entry() local 682 struct list_test_struct entries[5], *cur; list_test_list_for_each_entry_reverse() local [all...] |
H A D | stackdepot.c | 65 unsigned long entries[1]; /* Variable-sized array of entries. */ member 104 static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, in depot_alloc_stack() argument 107 int required_size = offsetof(struct stack_record, entries) + in depot_alloc_stack() 139 memcpy(stack->entries, entries, size * sizeof(unsigned long)); in depot_alloc_stack() 155 static inline u32 hash_stack(unsigned long *entries, unsigned int size) in hash_stack() argument 157 return jhash2((u32 *)entries, in hash_stack() 177 /* Find a stack that is equal to the one stored in entries in the hash */ 179 unsigned long *entries, in in find_stack() 178 find_stack(struct stack_record *bucket, unsigned long *entries, int size, u32 hash) find_stack() argument 202 stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) stack_depot_fetch() argument 235 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) stack_depot_save() argument 329 filter_irq_stacks(unsigned long *entries, unsigned int nr_entries) filter_irq_stacks() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | smu_helper.c | 224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 236 table->entries[table->count].smio_low = in phm_trim_voltage_table() 237 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 293 vol_table->entries[i].value = dep_table->entries[ in phm_get_svi2_vddci_voltage_table() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ |
H A D | smu_helper.c | 224 vvalue = vol_table->entries[i].value; in phm_trim_voltage_table() 228 if (vvalue == table->entries[j].value) { in phm_trim_voltage_table() 235 table->entries[table->count].value = vvalue; in phm_trim_voltage_table() 236 table->entries[table->count].smio_low = in phm_trim_voltage_table() 237 vol_table->entries[i].smio_low; in phm_trim_voltage_table() 265 vol_table->entries[i].value = dep_table->entries[i].mvdd; in phm_get_svi2_mvdd_voltage_table() 266 vol_table->entries[i].smio_low = 0; in phm_get_svi2_mvdd_voltage_table() 293 vol_table->entries[i].value = dep_table->entries[ in phm_get_svi2_vddci_voltage_table() [all...] |
/kernel/linux/linux-5.10/tools/lib/api/fd/ |
H A D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fd in fdarray__add() [all...] |
/kernel/linux/linux-5.10/kernel/events/ |
H A D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() 155 struct callchain_cpus_entries *entries; get_callchain_entry() local [all...] |
/kernel/linux/linux-6.6/kernel/events/ |
H A D | callchain.c | 50 struct callchain_cpus_entries *entries; in release_callchain_buffers_rcu() local 53 entries = container_of(head, struct callchain_cpus_entries, rcu_head); in release_callchain_buffers_rcu() 56 kfree(entries->cpu_entries[cpu]); in release_callchain_buffers_rcu() 58 kfree(entries); in release_callchain_buffers_rcu() 63 struct callchain_cpus_entries *entries; in release_callchain_buffers() local 65 entries = callchain_cpus_entries; in release_callchain_buffers() 67 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); in release_callchain_buffers() 74 struct callchain_cpus_entries *entries; in alloc_callchain_buffers() local 83 entries = kzalloc(size, GFP_KERNEL); in alloc_callchain_buffers() 84 if (!entries) in alloc_callchain_buffers() 155 struct callchain_cpus_entries *entries; get_callchain_entry() local [all...] |
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member 57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 56 mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_do_alloc() argument 186 mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_new() argument 194 mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_newdev() argument 308 mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) mm_iommu_get() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/netronome/nfp/nfpcore/ |
H A D | nfp_nsp_eth.c | 281 union eth_table_entry *entries; in __nfp_eth_read_ports() local 285 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); in __nfp_eth_read_ports() 286 if (!entries) in __nfp_eth_read_ports() 289 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); in __nfp_eth_read_ports() 296 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 304 nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", in __nfp_eth_read_ports() 315 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 316 nfp_eth_port_translate(nsp, &entries[i], i, in __nfp_eth_read_ports() 325 kfree(entries); in __nfp_eth_read_ports() 330 kfree(entries); in __nfp_eth_read_ports() 336 union eth_table_entry *entries; nfp_eth_config_start() local 373 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_config_cleanup_end() local 397 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_config_commit_end() local 426 union eth_table_entry *entries; nfp_eth_set_mod_enable() local 465 union eth_table_entry *entries; nfp_eth_set_configured() local 502 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_set_bit_config() local 533 union eth_table_entry *entries; nfp_eth_set_idmode() local [all...] |
/kernel/linux/linux-6.6/tools/lib/api/fd/ |
H A D | array.c | 15 fda->entries = NULL; in fdarray__init() 27 struct pollfd *entries = realloc(fda->entries, size); in fdarray__grow() local 29 if (entries == NULL) in fdarray__grow() 34 free(entries); in fdarray__grow() 38 memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr); in fdarray__grow() 42 fda->entries = entries; in fdarray__grow() 65 free(fda->entries); in fdarray__exit() 84 fda->entries[fd in fdarray__add() [all...] |
/kernel/linux/linux-6.6/tools/perf/util/ |
H A D | arm64-frame-pointer-unwind-support.c | 12 struct entries { struct 25 struct entries *entries = arg; in add_entry() local 27 entries->stack[entries->length++] = entry->ip; in add_entry() 34 struct entries entries = {}; in get_leaf_frame_caller_aarch64() local 56 ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true); in get_leaf_frame_caller_aarch64() 59 if (ret || entries.length != 2) in get_leaf_frame_caller_aarch64() 62 return callchain_param.order == ORDER_CALLER ? entries in get_leaf_frame_caller_aarch64() [all...] |
H A D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 97 /* Cut unused entries, due to merging. */ in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 101 entries in mem2node__init() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/ |
H A D | dcn314_clk_mgr.c | 359 .entries = { 396 .entries = { 443 /* skip empty entries, the smu array has no holes*/ in dcn314_build_watermark_ranges() 444 if (!bw_params->wm_table.entries[i].valid) in dcn314_build_watermark_ranges() 447 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn314_build_watermark_ranges() 448 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn314_build_watermark_ranges() 459 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn314_build_watermark_ranges() 462 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn314_build_watermark_ranges() 572 struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1]; in dcn314_clk_mgr_helper_populate_bw_params() 594 /* Invalid number of entries i in dcn314_clk_mgr_helper_populate_bw_params() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/ |
H A D | dcn315_clk_mgr.c | 248 .entries = { 296 .entries = { 333 .entries = { 380 /* skip empty entries, the smu array has no holes*/ in dcn315_build_watermark_ranges() 381 if (!bw_params->wm_table.entries[i].valid) in dcn315_build_watermark_ranges() 384 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; in dcn315_build_watermark_ranges() 385 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; in dcn315_build_watermark_ranges() 396 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; in dcn315_build_watermark_ranges() 399 bw_params->clk_table.entries[i].dcfclk_mhz; in dcn315_build_watermark_ranges() 479 struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_param in dcn315_clk_mgr_helper_populate_bw_params() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
H A D | nfp_nsp_eth.c | 247 union eth_table_entry *entries; in __nfp_eth_read_ports() local 251 entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); in __nfp_eth_read_ports() 252 if (!entries) in __nfp_eth_read_ports() 255 ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); in __nfp_eth_read_ports() 262 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 270 nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", in __nfp_eth_read_ports() 281 if (entries[i].port & NSP_ETH_PORT_LANES_MASK) in __nfp_eth_read_ports() 282 nfp_eth_port_translate(nsp, &entries[i], i, in __nfp_eth_read_ports() 289 kfree(entries); in __nfp_eth_read_ports() 294 kfree(entries); in __nfp_eth_read_ports() 300 union eth_table_entry *entries; nfp_eth_config_start() local 337 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_config_cleanup_end() local 361 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_config_commit_end() local 390 union eth_table_entry *entries; nfp_eth_set_mod_enable() local 429 union eth_table_entry *entries; nfp_eth_set_configured() local 466 union eth_table_entry *entries = nfp_nsp_config_entries(nsp); nfp_eth_set_bit_config() local [all...] |
/kernel/linux/linux-5.10/tools/perf/util/ |
H A D | mem2node.c | 50 struct phys_entry *entries, *tmp_entries; in mem2node__init() local 62 entries = zalloc(sizeof(*entries) * max); in mem2node__init() 63 if (!entries) in mem2node__init() 84 struct phys_entry *prev = &entries[j - 1]; in mem2node__init() 93 phys_entry__init(&entries[j++], start, bsize, n->node); in mem2node__init() 97 /* Cut unused entries, due to merging. */ in mem2node__init() 98 tmp_entries = realloc(entries, sizeof(*entries) * j); in mem2node__init() 100 entries in mem2node__init() [all...] |
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
H A D | iommu_api.c | 34 u64 entries; /* number of entries in hpas/hpages[] */ member 57 unsigned long entries, unsigned long dev_hpa, in mm_iommu_do_alloc() 66 ret = account_locked_vm(mm, entries, true); in mm_iommu_do_alloc() 70 locked_entries = entries; in mm_iommu_do_alloc() 80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc() 91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc() 92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc() 102 chunk = min(chunk, entries); in mm_iommu_do_alloc() 56 mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_do_alloc() argument 185 mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_new() argument 193 mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, unsigned long entries, unsigned long dev_hpa, struct mm_iommu_table_group_mem_t **pmem) mm_iommu_newdev() argument 323 mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries) mm_iommu_get() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/dsa/sja1105/ |
H A D | sja1105_vl.c | 27 if (list_empty(&gating_cfg->entries)) { in sja1105_insert_gate_entry() 28 list_add(&e->list, &gating_cfg->entries); in sja1105_insert_gate_entry() 32 list_for_each_entry(p, &gating_cfg->entries, list) { in sja1105_insert_gate_entry() 54 /* The gate entries contain absolute times in their e->interval field. Convert 65 list_for_each_entry(e, &gating_cfg->entries, list) { in sja1105_gating_cfg_time_to_interval() 70 if (prev == &gating_cfg->entries) in sja1105_gating_cfg_time_to_interval() 76 last_e = list_last_entry(&gating_cfg->entries, in sja1105_gating_cfg_time_to_interval() 85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { in sja1105_free_gating_config() 144 u8 gate_state = rule->vl.entries[i].gate_state; in sja1105_compose_gating_subschedule() 157 time += rule->vl.entries[ in sja1105_compose_gating_subschedule() 567 sja1105_vl_gate(struct sja1105_private *priv, int port, struct netlink_ext_ack *extack, unsigned long cookie, struct sja1105_key *key, u32 index, s32 prio, u64 base_time, u64 cycle_time, u64 cycle_time_ext, u32 num_entries, struct action_gate_entry *entries) sja1105_vl_gate() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/dsa/sja1105/ |
H A D | sja1105_vl.c | 27 if (list_empty(&gating_cfg->entries)) { in sja1105_insert_gate_entry() 28 list_add(&e->list, &gating_cfg->entries); in sja1105_insert_gate_entry() 32 list_for_each_entry(p, &gating_cfg->entries, list) { in sja1105_insert_gate_entry() 54 /* The gate entries contain absolute times in their e->interval field. Convert 65 list_for_each_entry(e, &gating_cfg->entries, list) { in sja1105_gating_cfg_time_to_interval() 70 if (prev == &gating_cfg->entries) in sja1105_gating_cfg_time_to_interval() 76 last_e = list_last_entry(&gating_cfg->entries, in sja1105_gating_cfg_time_to_interval() 85 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { in sja1105_free_gating_config() 144 u8 gate_state = rule->vl.entries[i].gate_state; in sja1105_compose_gating_subschedule() 157 time += rule->vl.entries[ in sja1105_compose_gating_subschedule() 581 sja1105_vl_gate(struct sja1105_private *priv, int port, struct netlink_ext_ack *extack, unsigned long cookie, struct sja1105_key *key, u32 index, s32 prio, u64 base_time, u64 cycle_time, u64 cycle_time_ext, u32 num_entries, struct action_gate_entry *entries) sja1105_vl_gate() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/dml/dcn321/ |
H A D | dcn321_fpu.c | 260 * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK 294 * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing 295 * and remove entries that do not follow this order 365 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz) in build_synthetic_soc_states() 366 max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; in build_synthetic_soc_states() 367 if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz) in build_synthetic_soc_states() 368 max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; in build_synthetic_soc_states() 369 if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz) in build_synthetic_soc_states() 370 max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; in build_synthetic_soc_states() 371 if (bw_params->clk_table.entries[ in build_synthetic_soc_states() [all...] |
/kernel/linux/linux-6.6/fs/nfs_common/ |
H A D | nfsacl.c | 13 * four instead of three entries. 16 * the ACL_MASK and ACL_GROUP_OBJ entries may differ.) 18 * entries contain the identifiers of the owner and owning group. 20 * - ACL entries in the kernel are kept sorted in ascending order 95 int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; in nfsacl_encode() local 99 .array_len = encode_entries ? entries : 0, in nfsacl_encode() 110 if (entries > NFS_ACL_MAX_ENTRIES || in nfsacl_encode() 111 xdr_encode_word(buf, base, entries)) in nfsacl_encode() 122 /* Insert entries in canonical order: other orders seem in nfsacl_encode() 157 u32 entries in nfs_stream_encode_acl() local 345 u32 entries; nfsacl_decode() local 394 u32 entries; nfs_stream_decode_acl() local [all...] |