/kernel/linux/linux-6.6/include/linux/ |
H A D | random.h | 53 u32 __get_random_u32_below(u32 ceil); 56 * Returns a random integer in the interval [0, ceil), with uniform 57 * distribution, suitable for all uses. Fastest when ceil is a constant, but 58 * still fast for variable ceil as well. 60 static inline u32 get_random_u32_below(u32 ceil) in get_random_u32_below() argument 62 if (!__builtin_constant_p(ceil)) in get_random_u32_below() 63 return __get_random_u32_below(ceil); in get_random_u32_below() 66 * For the fast path, below, all operations on ceil are precomputed by in get_random_u32_below() 71 * whose lower half would indicate a range indivisible by ceil. in get_random_u32_below() 73 BUILD_BUG_ON_MSG(!ceil, "get_random_u32_belo in get_random_u32_below() 110 get_random_u32_inclusive(u32 floor, u32 ceil) get_random_u32_inclusive() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | htb.h | 26 u32 parent_classid, u64 rate, u64 ceil, 30 u64 rate, u64 ceil, struct netlink_ext_ack *extack); 37 mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
|
H A D | htb.c | 270 qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n", in mlx5e_htb_convert_rate() 276 static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw) in mlx5e_htb_convert_ceil() argument 279 *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1); in mlx5e_htb_convert_ceil() 281 qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n", in mlx5e_htb_convert_ceil() 282 ceil, *max_average_bw); in mlx5e_htb_convert_ceil() 287 u32 parent_classid, u64 rate, u64 ceil, in mlx5e_htb_leaf_alloc_queue() 295 qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n", in mlx5e_htb_leaf_alloc_queue() 296 classid, parent_classid, rate, ceil); in mlx5e_htb_leaf_alloc_queue() 314 mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw); in mlx5e_htb_leaf_alloc_queue() 343 u64 rate, u64 ceil, struc in mlx5e_htb_leaf_to_inner() 286 mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid, u32 parent_classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack) mlx5e_htb_leaf_alloc_queue() argument 342 mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack) mlx5e_htb_leaf_to_inner() argument 655 mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack) mlx5e_htb_node_modify() argument [all...] |
H A D | qos.c | 412 htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack); in mlx5e_htb_setup_tc() 419 htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack); in mlx5e_htb_setup_tc() 428 return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil, in mlx5e_htb_setup_tc()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/display/dc/dml/ |
H A D | dml_inline_defs.h | 80 double ceil = dml_ceil(a, 1); in dml_round() local 84 return ceil; in dml_round()
|
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_htb.c | 97 struct psched_ratecfg ceil; member 676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens() 691 * borrowing from "level". It accounts bytes to ceil leaky bucket for 1272 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class() 1285 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class() 1286 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class() 1801 if (!hopt->rate.rate || !hopt->ceil.rate) in htb_change_class() 1806 if (hopt->rate.overhead || hopt->ceil.overhead) { in htb_change_class() 1810 if (hopt->rate.mpu || hopt->ceil in htb_change_class() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | qos.c | 119 maxrate = (node->rate > node->ceil) ? node->rate : node->ceil; in otx2_config_sched_shaping() 470 txschq_node->ceil = 0; in otx2_qos_alloc_txschq_node() 500 u16 classid, u32 prio, u64 rate, u64 ceil, in otx2_qos_sw_create_leaf_node() 516 node->ceil = otx2_convert_rate(ceil); in otx2_qos_sw_create_leaf_node() 1203 u32 parent_classid, u64 rate, u64 ceil, in otx2_qos_leaf_alloc_queue() 1213 "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n", in otx2_qos_leaf_alloc_queue() 1214 classid, parent_classid, rate, ceil, prio, quantum); in otx2_qos_leaf_alloc_queue() 1283 ceil, quantu in otx2_qos_leaf_alloc_queue() 498 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent, u16 classid, u32 prio, u64 rate, u64 ceil, u32 quantum, u16 qid, bool static_cfg) otx2_qos_sw_create_leaf_node() argument 1202 otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, u32 parent_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) otx2_qos_leaf_alloc_queue() argument 1341 otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, u16 child_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) otx2_qos_leaf_to_inner() argument [all...] |
H A D | qos.h | 60 u64 ceil; member
|
/kernel/linux/linux-5.10/drivers/iio/light/ |
H A D | lv0104cs.c | 278 int floor, ceil, mid; in lv0104cs_set_calibscale() local 285 ceil = lv0104cs_calibscales[i + 1].val * 1000000 in lv0104cs_set_calibscale() 287 mid = (floor + ceil) / 2; in lv0104cs_set_calibscale() 296 if (calibscale >= mid && calibscale <= ceil) { in lv0104cs_set_calibscale()
|
/kernel/linux/linux-6.6/drivers/iio/light/ |
H A D | lv0104cs.c | 278 int floor, ceil, mid; in lv0104cs_set_calibscale() local 285 ceil = lv0104cs_calibscales[i + 1].val * 1000000 in lv0104cs_set_calibscale() 287 mid = (floor + ceil) / 2; in lv0104cs_set_calibscale() 296 if (calibscale >= mid && calibscale <= ceil) { in lv0104cs_set_calibscale()
|
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_htb.c | 97 struct psched_ratecfg ceil; member 640 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens() 651 * borrowing from "level". It accounts bytes to ceil leaky bucket for 1098 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class() 1109 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class() 1110 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class() 1327 if (!hopt->rate.rate || !hopt->ceil.rate) in htb_change_class() 1335 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class() 1336 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, t in htb_change_class() [all...] |
/kernel/linux/linux-6.6/drivers/char/ |
H A D | random.c | 186 * u32 get_random_u32_below(u32 ceil) 188 * u32 get_random_u32_inclusive(u32 floor, u32 ceil) 535 u32 __get_random_u32_below(u32 ceil) in DEFINE_BATCHED_ENTROPY() 538 * This is the slow path for variable ceil. It is still fast, most of in DEFINE_BATCHED_ENTROPY() 540 * opportunistically comparing the lower half to ceil itself, before in DEFINE_BATCHED_ENTROPY() 542 * whose lower half would indicate a range indivisible by ceil. The use in DEFINE_BATCHED_ENTROPY() 543 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable in DEFINE_BATCHED_ENTROPY() 550 * This function is technically undefined for ceil in DEFINE_BATCHED_ENTROPY() [all...] |
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/linux/ |
H A D | pkt_sched.h | 243 struct tc_ratespec ceil; member
|
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/ |
H A D | kvm_util.c | 1830 bool ceil) in vm_calc_num_pages() 1837 return num_pages / n + !!(ceil && num_pages % n); in vm_calc_num_pages() 1827 vm_calc_num_pages(unsigned int num_pages, unsigned int page_shift, unsigned int new_page_shift, bool ceil) vm_calc_num_pages() argument
|
/kernel/linux/linux-5.10/tools/include/uapi/linux/ |
H A D | pkt_sched.h | 391 struct tc_ratespec ceil; member
|
/kernel/linux/linux-5.10/include/uapi/linux/ |
H A D | pkt_sched.h | 411 struct tc_ratespec ceil; member
|
/kernel/linux/linux-6.6/include/uapi/linux/ |
H A D | pkt_sched.h | 411 struct tc_ratespec ceil; member
|
/kernel/linux/linux-6.6/tools/include/uapi/linux/ |
H A D | pkt_sched.h | 391 struct tc_ratespec ceil; member
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/linux/ |
H A D | pkt_sched.h | 285 struct tc_ratespec ceil; member
|
/kernel/linux/patches/linux-6.6/prebuilts/usr/include/linux/ |
H A D | pkt_sched.h | 285 struct tc_ratespec ceil; member
|
/kernel/linux/linux-6.6/include/net/ |
H A D | pkt_cls.h | 858 u64 ceil; member
|
/kernel/linux/linux-5.10/drivers/media/tuners/ |
H A D | mt2063.c | 354 #define ceil(n, d) (((n) < 0) ? (-((-(n))/(d))) : (n)/(d) + ((n)%(d) != 0)) macro 610 /* ceil function */ in MT2063_ChooseFirstIF() 612 ceil((s32) (pNode->max_ - f_Center), (s32) f_Step); in MT2063_ChooseFirstIF()
|
/kernel/linux/linux-6.6/drivers/media/tuners/ |
H A D | mt2063.c | 354 #define ceil(n, d) (((n) < 0) ? (-((-(n))/(d))) : (n)/(d) + ((n)%(d) != 0)) macro 610 /* ceil function */ in MT2063_ChooseFirstIF() 612 ceil((s32) (pNode->max_ - f_Center), (s32) f_Step); in MT2063_ChooseFirstIF()
|
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/ |
H A D | kvm_util.c | 2017 bool ceil) in vm_calc_num_pages() 2024 return num_pages / n + !!(ceil && num_pages % n); in vm_calc_num_pages() 2014 vm_calc_num_pages(unsigned int num_pages, unsigned int page_shift, unsigned int new_page_shift, bool ceil) vm_calc_num_pages() argument
|
/kernel/linux/linux-6.6/crypto/ |
H A D | testmgr.c | 880 static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil) in prandom_u32_below() argument 883 * This is slightly biased for non-power-of-2 values of 'ceil', but this in prandom_u32_below() 886 return prandom_u32_state(rng) % ceil; in prandom_u32_below() 895 u32 floor, u32 ceil) in prandom_u32_inclusive() 897 return floor + prandom_u32_below(rng, ceil - floor + 1); in prandom_u32_inclusive() 894 prandom_u32_inclusive(struct rnd_state *rng, u32 floor, u32 ceil) prandom_u32_inclusive() argument
|