/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | pio.c | 333 count = INIT_SC_PER_VL * num_vls; in init_sc_pools_and_sizes() 1814 if (unlikely(vl >= num_vls)) { in pio_select_send_context_vl() 1891 * @num_vls: number of vls 1906 * If either the num_vls or num_send_contexts are non-power of 2, the 1915 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) in pio_map_init() argument 1929 sc_per_vl = num_kernel_send_contexts / num_vls; in pio_map_init() 1931 extra = num_kernel_send_contexts % num_vls; in pio_map_init() 1934 for (i = num_vls - 1; i >= 0; i--, extra--) in pio_map_init() 1939 roundup_pow_of_two(num_vls) * in pio_map_init() 1944 newmap->actual_vls = num_vls; in pio_map_init() [all...] |
H A D | sdma.c | 794 if (vl >= num_vls) { in sdma_select_engine_vl() 1178 * @num_vls: number of vls 1192 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1201 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) in sdma_map_init() argument 1214 sde_per_vl = dd->num_sdma / num_vls; in sdma_map_init() 1216 extra = dd->num_sdma % num_vls; in sdma_map_init() 1219 for (i = num_vls - 1; i >= 0; i--, extra--) in sdma_map_init() 1225 roundup_pow_of_two(num_vls) * in sdma_map_init() 1230 newmap->actual_vls = num_vls; in sdma_map_init() 1231 newmap->vls = roundup_pow_of_two(num_vls); in sdma_map_init() [all...] |
H A D | pio.h | 176 * a computation based on num_kernel_send_contexts/num_vls: 179 * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls 183 * In the case where there are num_kernel_send_contexts/num_vls doesn't divide 269 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
|
H A D | mad.c | 2736 u8 num_vls = hweight64(vl_select_mask); in pma_get_opa_portstatus() local 2745 response_data_size = struct_size(rsp, vls, num_vls); in pma_get_opa_portstatus() 2752 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { in pma_get_opa_portstatus() 2987 u8 lq, num_vls; in pma_get_opa_datacounters() local 2998 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); in pma_get_opa_datacounters() 3011 response_data_size = struct_size(req, port[0].vls, num_vls); in pma_get_opa_datacounters() 3206 u8 num_vls; in pma_get_opa_porterrors() local 3220 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); in pma_get_opa_porterrors() 3227 response_data_size = struct_size(req, port[0].vls, num_vls); in pma_get_opa_porterrors()
|
H A D | sdma.h | 961 * a computation based on num_sdma/num_vls: 964 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls 968 * In the case where there are num_sdma/num_vls doesn't divide 1056 u8 num_vls,
|
H A D | chip.c | 71 uint num_vls = HFI1_MAX_VLS_SUPPORTED; variable 72 module_param(num_vls, uint, S_IRUGO); 73 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 5840 for (i = 0; i < num_vls; i++) in sc_to_vl() 13431 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { in set_up_context_variables() 13434 send_contexts - num_vls - 1, in set_up_context_variables() 13436 num_kernel_contexts = send_contexts - num_vls - 1; in set_up_context_variables() 14358 num_vls == 1 || in qos_rmt_entries() 14363 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) in qos_rmt_entries() 14371 n = ilog2(__roundup_pow_of_two(num_vls)); in qos_rmt_entries() [all...] |
H A D | verbs.c | 1545 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) in hfi1_check_ah() 1572 if (ah->vl < num_vls || ah->vl == 15) in hfi1_notify_new_ah()
|
H A D | chip.h | 830 extern uint num_vls;
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | pio.c | 291 count = INIT_SC_PER_VL * num_vls; in init_sc_pools_and_sizes() 1772 if (unlikely(vl >= num_vls)) { in pio_select_send_context_vl() 1849 * @num_vls: number of vls 1864 * If either the num_vls or num_send_contexts are non-power of 2, the 1873 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) in pio_map_init() argument 1887 sc_per_vl = num_kernel_send_contexts / num_vls; in pio_map_init() 1889 extra = num_kernel_send_contexts % num_vls; in pio_map_init() 1892 for (i = num_vls - 1; i >= 0; i--, extra--) in pio_map_init() 1896 newmap = kzalloc(struct_size(newmap, map, roundup_pow_of_two(num_vls)), in pio_map_init() 1900 newmap->actual_vls = num_vls; in pio_map_init() [all...] |
H A D | sdma.c | 752 if (vl >= num_vls) { in sdma_select_engine_vl() 1136 * @num_vls: number of vls 1150 * If either the num_vls or num_sdma are non-power of 2, the array sizes 1159 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) in sdma_map_init() argument 1172 sde_per_vl = dd->num_sdma / num_vls; in sdma_map_init() 1174 extra = dd->num_sdma % num_vls; in sdma_map_init() 1177 for (i = num_vls - 1; i >= 0; i--, extra--) in sdma_map_init() 1183 roundup_pow_of_two(num_vls) * in sdma_map_init() 1188 newmap->actual_vls = num_vls; in sdma_map_init() 1189 newmap->vls = roundup_pow_of_two(num_vls); in sdma_map_init() [all...] |
H A D | pio.h | 134 * a computation based on num_kernel_send_contexts/num_vls: 137 * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls 141 * In the case where there are num_kernel_send_contexts/num_vls doesn't divide 227 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
|
H A D | mad.c | 2694 u8 num_vls = hweight64(vl_select_mask); in pma_get_opa_portstatus() local 2703 response_data_size = struct_size(rsp, vls, num_vls); in pma_get_opa_portstatus() 2710 num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { in pma_get_opa_portstatus() 2945 u8 lq, num_vls; in pma_get_opa_datacounters() local 2956 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); in pma_get_opa_datacounters() 2969 response_data_size = struct_size(req, port.vls, num_vls); in pma_get_opa_datacounters() 3164 u8 num_vls; in pma_get_opa_porterrors() local 3178 num_vls = hweight32(be32_to_cpu(req->vl_select_mask)); in pma_get_opa_porterrors() 3185 response_data_size = struct_size(req, port.vls, num_vls); in pma_get_opa_porterrors()
|
H A D | sdma.h | 902 * a computation based on num_sdma/num_vls: 905 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls 909 * In the case where there are num_sdma/num_vls doesn't divide 997 u8 num_vls,
|
H A D | chip.c | 30 uint num_vls = HFI1_MAX_VLS_SUPPORTED; variable 31 module_param(num_vls, uint, S_IRUGO); 32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 5800 for (i = 0; i < num_vls; i++) in sc_to_vl() 13392 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { in set_up_context_variables() 13395 send_contexts - num_vls - 1, in set_up_context_variables() 13397 num_kernel_contexts = send_contexts - num_vls - 1; in set_up_context_variables() 14319 num_vls == 1 || in qos_rmt_entries() 14324 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) in qos_rmt_entries() 14332 n = ilog2(__roundup_pow_of_two(num_vls)); in qos_rmt_entries() [all...] |
H A D | verbs.c | 1501 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) in hfi1_check_ah() 1528 if (ah->vl < num_vls || ah->vl == 15) in hfi1_notify_new_ah()
|
H A D | chip.h | 783 extern uint num_vls;
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_iba7322.c | 111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); 112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 6532 "Invalid num_vls %u, using 4 VLs\n", in qib_init_7322_variables() 6544 "Invalid num_vls %u for MTU %d , using 4 VLs\n", in qib_init_7322_variables()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_iba7322.c | 111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO); 112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 6503 "Invalid num_vls %u, using 4 VLs\n", in qib_init_7322_variables() 6515 "Invalid num_vls %u for MTU %d , using 4 VLs\n", in qib_init_7322_variables()
|