/kernel/linux/linux-5.10/drivers/crypto/ccp/ |
H A D | ccp-crypto-main.c | 55 struct list_head *backlog; member 97 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() 102 *backlog = NULL; in ccp_crypto_cmd_complete() 117 /* Process the backlog: in ccp_crypto_cmd_complete() 119 * special precautions have to be taken when handling the backlog. in ccp_crypto_cmd_complete() 121 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 122 /* Skip over this cmd if it is the next backlog cmd */ in ccp_crypto_cmd_complete() 123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 126 *backlog in ccp_crypto_cmd_complete() 96 ccp_crypto_cmd_complete( struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) ccp_crypto_cmd_complete() argument 147 struct ccp_crypto_cmd *held, *next, *backlog; ccp_crypto_complete() local [all...] |
H A D | ccp-dev.c | 280 * backlogged) or advancement out of the backlog. If the cmd has 281 * advanced out of the backlog the "err" value of the callback 315 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd() 378 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local 403 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd() 404 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd() 406 list_del(&backlog->entry); in ccp_dequeue_cmd() 411 if (backlog) { in ccp_dequeue_cmd() 412 INIT_WORK(&backlog in ccp_dequeue_cmd() [all...] |
/kernel/linux/linux-6.6/drivers/crypto/intel/qat/qat_common/ |
H A D | qat_algs_send.c | 24 void qat_alg_send_backlog(struct qat_instance_backlog *backlog) in qat_alg_send_backlog() argument 28 spin_lock_bh(&backlog->lock); in qat_alg_send_backlog() 29 list_for_each_entry_safe(req, tmp, &backlog->list, list) { in qat_alg_send_backlog() 40 spin_unlock_bh(&backlog->lock); in qat_alg_send_backlog() 45 struct qat_instance_backlog *backlog = req->backlog; in qat_alg_try_enqueue() local 50 if (!list_empty(&backlog->list)) in qat_alg_try_enqueue() 67 struct qat_instance_backlog *backlog = req->backlog; in qat_alg_send_message_maybacklog() local 73 spin_lock_bh(&backlog in qat_alg_send_message_maybacklog() [all...] |
H A D | qat_algs_send.h | 11 spinlock_t lock; /* protects backlog list */ 19 struct qat_instance_backlog *backlog; member 23 void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
|
/kernel/linux/linux-6.6/drivers/crypto/ccp/ |
H A D | ccp-crypto-main.c | 55 struct list_head *backlog; member 90 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() 95 *backlog = NULL; in ccp_crypto_cmd_complete() 110 /* Process the backlog: in ccp_crypto_cmd_complete() 112 * special precautions have to be taken when handling the backlog. in ccp_crypto_cmd_complete() 114 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 115 /* Skip over this cmd if it is the next backlog cmd */ in ccp_crypto_cmd_complete() 116 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 117 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 119 *backlog in ccp_crypto_cmd_complete() 89 ccp_crypto_cmd_complete( struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) ccp_crypto_cmd_complete() argument 140 struct ccp_crypto_cmd *held, *next, *backlog; ccp_crypto_complete() local [all...] |
H A D | ccp-dev.c | 280 * backlogged) or advancement out of the backlog. If the cmd has 281 * advanced out of the backlog the "err" value of the callback 315 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd() 378 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local 403 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd() 404 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd() 406 list_del(&backlog->entry); in ccp_dequeue_cmd() 411 if (backlog) { in ccp_dequeue_cmd() 412 INIT_WORK(&backlog in ccp_dequeue_cmd() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/net/forwarding/ |
H A D | sch_red.sh | 9 # to go to backlog. 164 qdisc_stats_get $swp3 11: .backlog 190 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on 192 # backlog size to stdout. 258 local backlog 261 # Build the below-the-limit backlog using UDP. We could use TCP just 266 backlog=$(build_backlog $((2 * limit / 3)) udp) 267 check_err $? "Could not build the requested backlog" 269 check_err $? "backlog $backlog / [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/net/forwarding/ |
H A D | sch_red.sh | 9 # to go to backlog. 164 qdisc_stats_get $swp3 11: .backlog 190 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on 192 # backlog size to stdout. 258 local backlog 261 # Build the below-the-limit backlog using UDP. We could use TCP just 266 backlog=$(build_backlog $((2 * limit / 3)) udp) 267 check_err $? "Could not build the requested backlog" 269 check_err $? "backlog $backlog / [all...] |
/kernel/linux/linux-5.10/tools/perf/ui/gtk/ |
H A D | helpline.c | 32 static int backlog; in gtk_helpline_show() local 34 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show() 35 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show() 36 backlog += ret; in gtk_helpline_show() 40 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show() 43 backlog = 0; in gtk_helpline_show()
|
/kernel/linux/linux-5.10/tools/perf/ui/tui/ |
H A D | helpline.c | 34 static int backlog; in tui_helpline__show() local 37 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show() 38 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show() 39 backlog += ret; in tui_helpline__show() 43 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show() 46 backlog = 0; in tui_helpline__show()
|
/kernel/linux/linux-6.6/tools/perf/ui/tui/ |
H A D | helpline.c | 33 static int backlog; in tui_helpline__show() local 36 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show() 37 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show() 38 backlog += ret; in tui_helpline__show() 42 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show() 45 backlog = 0; in tui_helpline__show()
|
/kernel/linux/linux-6.6/tools/perf/ui/gtk/ |
H A D | helpline.c | 32 static int backlog; in gtk_helpline_show() local 34 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show() 35 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show() 36 backlog += ret; in gtk_helpline_show() 40 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show() 43 backlog = 0; in gtk_helpline_show()
|
/kernel/linux/linux-5.10/include/net/ |
H A D | fq_impl.h | 22 flow->backlog -= skb->len; in fq_adjust_removal() 23 fq->backlog--; in fq_adjust_removal() 31 if (flow->backlog == 0) { in fq_rejigger_backlog() 37 if (i->backlog < flow->backlog) in fq_rejigger_backlog() 150 if (i->backlog > flow->backlog) in fq_recalc_backlog() 170 flow->backlog += skb->len; in fq_tin_enqueue() 174 fq->backlog++; in fq_tin_enqueue() 186 while (fq->backlog > f in fq_tin_enqueue() [all...] |
H A D | codel_impl.h | 107 u32 *backlog, in codel_should_drop() 125 *backlog <= params->mtu) { in codel_should_drop() 143 u32 *backlog, in codel_dequeue() 162 skb_len_func, skb_time_func, backlog, now); in codel_dequeue() 172 * A large backlog might result in drop rates so high in codel_dequeue() 198 backlog, now)) { in codel_dequeue() 223 skb_time_func, backlog, now); in codel_dequeue() 100 codel_should_drop(const struct sk_buff *skb, void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, u32 *backlog, codel_time_t now) codel_should_drop() argument 142 codel_dequeue(void *ctx, u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) codel_dequeue() argument
|
/kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sch_red_core.sh | 8 # to the backlog. Any extra packets sent should almost 1:1 go to backlog. That 9 # is what H2 is used for--it sends the extra traffic to create backlog. 316 qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .backlog 353 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on 355 # backlog size to stdout. 409 local backlog 412 # Build the below-the-limit backlog using UDP. We could use TCP just 417 backlog=$(build_backlog $vlan $((2 * limit / 3)) udp) 418 check_err $? "Could not build the requested backlog" [all...] |
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_pie.c | 34 struct pie_vars *vars, u32 backlog, u32 packet_size) in pie_drop_early() 54 if (backlog < 2 * mtu) in pie_drop_early() 96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue() 209 struct pie_vars *vars, u32 backlog) in pie_process_dequeue() 225 if (backlog == 0) in pie_process_dequeue() 238 if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) { in pie_process_dequeue() 277 if (backlog < QUEUE_THRESHOLD) { in pie_process_dequeue() 301 u32 backlog) in pie_calculate_probability() 316 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; in pie_calculate_probability() 324 /* If qdelay is zero and backlog i in pie_calculate_probability() 33 pie_drop_early(struct Qdisc *sch, struct pie_params *params, struct pie_vars *vars, u32 backlog, u32 packet_size) pie_drop_early() argument 208 pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, struct pie_vars *vars, u32 backlog) pie_process_dequeue() argument 300 pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, u32 backlog) pie_calculate_probability() argument [all...] |
H A D | sch_gred.c | 39 u32 backlog; /* bytes on the virtualQ */ member 117 return sch->qstats.backlog; in gred_backlog() 119 return q->backlog; in gred_backlog() 181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 247 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 272 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", in gred_dequeue() 275 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 278 if (!sch->qstats.backlog) in gred_dequeue() 281 if (!q->backlog) in gred_dequeue() 306 q->backlog in gred_reset() [all...] |
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_pie.c | 34 struct pie_vars *vars, u32 backlog, u32 packet_size) in pie_drop_early() 54 if (backlog < 2 * mtu) in pie_drop_early() 96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue() 206 struct pie_vars *vars, u32 backlog) in pie_process_dequeue() 222 if (backlog == 0) in pie_process_dequeue() 235 if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) { in pie_process_dequeue() 274 if (backlog < QUEUE_THRESHOLD) { in pie_process_dequeue() 298 u32 backlog) in pie_calculate_probability() 313 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; in pie_calculate_probability() 321 /* If qdelay is zero and backlog i in pie_calculate_probability() 33 pie_drop_early(struct Qdisc *sch, struct pie_params *params, struct pie_vars *vars, u32 backlog, u32 packet_size) pie_drop_early() argument 205 pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, struct pie_vars *vars, u32 backlog) pie_process_dequeue() argument 297 pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, u32 backlog) pie_calculate_probability() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_qdisc.c | 65 u64 backlog; member 264 return xstats->backlog[tclass_num] + in mlxsw_sp_xstats_backlog() 265 xstats->backlog[tclass_num + 8]; in mlxsw_sp_xstats_backlog() 318 u64 drops, u64 backlog, in mlxsw_sp_qdisc_update_stats() 326 backlog -= stats_base->backlog; in mlxsw_sp_qdisc_update_stats() 330 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); in mlxsw_sp_qdisc_update_stats() 332 stats_base->backlog += backlog; in mlxsw_sp_qdisc_update_stats() 315 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u64 tx_bytes, u64 tx_packets, u64 drops, u64 backlog, struct tc_qopt_offload_stats *stats_ptr) mlxsw_sp_qdisc_update_stats() argument 345 u64 backlog = 0; mlxsw_sp_qdisc_get_tc_stats() local 491 u64 backlog; mlxsw_sp_qdisc_leaf_unoffload() local 968 int tclass, i, band, backlog; __mlxsw_sp_qdisc_ets_replace() local 1046 u64 backlog; __mlxsw_sp_qdisc_ets_unoffload() local 1073 u64 backlog = 0; mlxsw_sp_qdisc_get_prio_stats() local [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sch_red_core.sh | 8 # to the backlog. Any extra packets sent should almost 1:1 go to backlog. That 9 # is what H2 is used for--it sends the extra traffic to create backlog. 315 qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .backlog 360 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on 362 # backlog size to stdout. 418 local backlog 421 # Build the below-the-limit backlog using UDP. We could use TCP just 426 backlog=$(build_backlog $vlan $((2 * limit / 3)) udp) 427 check_err $? "Could not build the requested backlog" [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_qdisc.c | 79 u64 backlog; member 200 tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog; in mlxsw_sp_qdisc_reduce_parent_backlog() 503 return xstats->backlog[tclass_num] + in mlxsw_sp_xstats_backlog() 504 xstats->backlog[tclass_num + 8]; in mlxsw_sp_xstats_backlog() 561 u64 drops, u64 backlog, in mlxsw_sp_qdisc_update_stats() 569 backlog -= stats_base->backlog; in mlxsw_sp_qdisc_update_stats() 573 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); in mlxsw_sp_qdisc_update_stats() 558 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u64 tx_bytes, u64 tx_packets, u64 drops, u64 backlog, struct tc_qopt_offload_stats *stats_ptr) mlxsw_sp_qdisc_update_stats() argument 588 u64 backlog = 0; mlxsw_sp_qdisc_get_tc_stats() local 751 u64 backlog; mlxsw_sp_qdisc_leaf_unoffload() local 1298 u64 backlog; mlxsw_sp_qdisc_walk_cb_clean_stats() local 1422 u64 backlog; __mlxsw_sp_qdisc_ets_unoffload() local 1448 u64 backlog = 0; mlxsw_sp_qdisc_get_prio_stats() local [all...] |
/kernel/linux/linux-6.6/include/net/ |
H A D | codel_impl.h | 111 u32 *backlog, in codel_should_drop() 129 *backlog <= params->mtu) { in codel_should_drop() 147 u32 *backlog, in codel_dequeue() 166 skb_len_func, skb_time_func, backlog, now); in codel_dequeue() 176 * A large backlog might result in drop rates so high in codel_dequeue() 202 backlog, now)) { in codel_dequeue() 227 skb_time_func, backlog, now); in codel_dequeue() 104 codel_should_drop(const struct sk_buff *skb, void *ctx, struct codel_vars *vars, struct codel_params *params, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, u32 *backlog, codel_time_t now) codel_should_drop() argument 146 codel_dequeue(void *ctx, u32 *backlog, struct codel_params *params, struct codel_vars *vars, struct codel_stats *stats, codel_skb_len_t skb_len_func, codel_skb_time_t skb_time_func, codel_skb_drop_t drop_func, codel_skb_dequeue_t dequeue_func) codel_dequeue() argument
|
H A D | fq_impl.h | 24 flow->backlog -= bytes; in __fq_adjust_removal() 25 fq->backlog -= packets; in __fq_adjust_removal() 28 if (flow->backlog) in __fq_adjust_removal() 176 cur_len = cur->backlog; in fq_find_fattest_flow() 185 unsigned int cur_len = tin->default_flow.backlog; in fq_find_fattest_flow() 210 if (!flow->backlog) { in fq_tin_enqueue() 220 flow->backlog += skb->len; in fq_tin_enqueue() 224 fq->backlog++; in fq_tin_enqueue() 235 while (fq->backlog > fq->limit || oom) { in fq_tin_enqueue() 308 WARN_ON_ONCE(flow->backlog); in fq_flow_reset() [all...] |
/kernel/linux/linux-5.10/drivers/crypto/qce/ |
H A D | core.c | 80 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local 95 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue() 105 if (backlog) { in qce_handle_queue() 107 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
|
/kernel/linux/linux-5.10/net/core/ |
H A D | gen_stats.c | 295 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 311 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() 347 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|