/kernel/linux/linux-5.10/sound/core/seq/oss/ |
H A D | seq_oss_readq.c | 48 q->qlen = 0; in snd_seq_oss_readq_new() 76 if (q->qlen) { in snd_seq_oss_readq_clear() 77 q->qlen = 0; in snd_seq_oss_readq_clear() 146 if (q->qlen >= q->maxlen - 1) { in snd_seq_oss_readq_put_event() 153 q->qlen++; in snd_seq_oss_readq_put_event() 171 if (q->qlen == 0) in snd_seq_oss_readq_pick() 184 (q->qlen > 0 || q->head == q->tail), in snd_seq_oss_readq_wait() 195 if (q->qlen > 0) { in snd_seq_oss_readq_free() 197 q->qlen--; in snd_seq_oss_readq_free() 209 return q->qlen; in snd_seq_oss_readq_poll() [all...] |
/kernel/linux/linux-6.6/sound/core/seq/oss/ |
H A D | seq_oss_readq.c | 48 q->qlen = 0; in snd_seq_oss_readq_new() 76 if (q->qlen) { in snd_seq_oss_readq_clear() 77 q->qlen = 0; in snd_seq_oss_readq_clear() 146 if (q->qlen >= q->maxlen - 1) { in snd_seq_oss_readq_put_event() 153 q->qlen++; in snd_seq_oss_readq_put_event() 171 if (q->qlen == 0) in snd_seq_oss_readq_pick() 184 (q->qlen > 0 || q->head == q->tail), in snd_seq_oss_readq_wait() 195 if (q->qlen > 0) { in snd_seq_oss_readq_free() 197 q->qlen--; in snd_seq_oss_readq_free() 209 return q->qlen; in snd_seq_oss_readq_poll() [all...] |
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_sfq.c | 103 sfq_index qlen; /* number of skbs in skblist */ member 207 int qlen = slot->qlen; in sfq_link() local 209 p = qlen + SFQ_MAX_FLOWS; in sfq_link() 210 n = q->dep[qlen].next; in sfq_link() 215 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ in sfq_link() 235 d = q->slots[x].qlen--; in sfq_dec() 248 d = ++q->slots[x].qlen; in sfq_inc() 310 sch->q.qlen--; in sfq_drop() 350 sfq_index x, qlen; in sfq_enqueue() local 628 unsigned int qlen, dropped = 0; sfq_change() local [all...] |
H A D | sch_sfb.c | 40 u16 qlen; /* length of virtual queue */ member 132 if (b[hash].qlen < 0xFFFF) in increment_one_qlen() 133 b[hash].qlen++; in increment_one_qlen() 161 if (b[hash].qlen > 0) in decrement_one_qlen() 162 b[hash].qlen--; in decrement_one_qlen() 196 * compute max qlen, max p_mark, and avg p_mark 201 u32 qlen = 0, prob = 0, totalpm = 0; in sfb_compute_qlen() local 205 if (qlen < b->qlen) in sfb_compute_qlen() 206 qlen in sfb_compute_qlen() [all...] |
H A D | sch_mq.c | 156 __u32 qlen = 0; in mq_dump() local 158 sch->q.qlen = 0; in mq_dump() 172 qlen = qdisc_qlen_sum(qdisc); in mq_dump() 178 &qdisc->qstats, qlen); in mq_dump() 179 sch->q.qlen += qlen; in mq_dump() 181 sch->q.qlen += qdisc->q.qlen; in mq_dump() 184 sch->qstats.qlen += qdisc->qstats.qlen; in mq_dump() [all...] |
H A D | sch_mqprio.c | 456 sch->q.qlen = 0; in mqprio_dump() 470 __u32 qlen = qdisc_qlen_sum(qdisc); in mqprio_dump() local 477 &qdisc->qstats, qlen); in mqprio_dump() 478 sch->q.qlen += qlen; in mqprio_dump() 480 sch->q.qlen += qdisc->q.qlen; in mqprio_dump() 579 __u32 qlen = 0; variable 600 qlen = qdisc_qlen_sum(qdisc); 608 qlen); [all...] |
H A D | sch_codel.c | 98 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, in codel_qdisc_dequeue() 101 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue() 138 unsigned int qlen, dropped = 0; in codel_change() local 175 qlen = sch->q.qlen; in codel_change() 176 while (sch->q.qlen > sch->limit) { in codel_change() 183 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); in codel_change()
|
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_sfq.c | 103 sfq_index qlen; /* number of skbs in skblist */ member 207 int qlen = slot->qlen; in sfq_link() local 209 p = qlen + SFQ_MAX_FLOWS; in sfq_link() 210 n = q->dep[qlen].next; in sfq_link() 215 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ in sfq_link() 235 d = q->slots[x].qlen--; in sfq_dec() 248 d = ++q->slots[x].qlen; in sfq_inc() 310 sch->q.qlen--; in sfq_drop() 350 sfq_index x, qlen; in sfq_enqueue() local 631 unsigned int qlen, dropped = 0; sfq_change() local [all...] |
H A D | sch_sfb.c | 40 u16 qlen; /* length of virtual queue */ member 132 if (b[hash].qlen < 0xFFFF) in increment_one_qlen() 133 b[hash].qlen++; in increment_one_qlen() 161 if (b[hash].qlen > 0) in decrement_one_qlen() 162 b[hash].qlen--; in decrement_one_qlen() 196 * compute max qlen, max p_mark, and avg p_mark 201 u32 qlen = 0, prob = 0, totalpm = 0; in sfb_compute_qlen() local 205 if (qlen < b->qlen) in sfb_compute_qlen() 206 qlen in sfb_compute_qlen() [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | rcu.h | 485 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen), 487 TP_ARGS(rcuname, rhp, qlen), 493 __field(long, qlen) 500 __entry->qlen = qlen; 505 __entry->qlen) 519 long qlen), 521 TP_ARGS(rcuname, rhp, offset, qlen), 527 __field(long, qlen) 534 __entry->qlen [all...] |
/kernel/linux/linux-5.10/sound/core/seq/ |
H A D | seq_midi_event.c | 50 int qlen; member 141 dev->qlen = 0; in reset_encode() 200 dev->qlen = status_event[dev->type].qlen; in snd_midi_event_encode_byte() 202 if (dev->qlen > 0) { in snd_midi_event_encode_byte() 206 dev->qlen--; in snd_midi_event_encode_byte() 210 dev->qlen = status_event[dev->type].qlen - 1; in snd_midi_event_encode_byte() 214 if (dev->qlen == 0) { in snd_midi_event_encode_byte() 320 int qlen; in snd_midi_event_decode() local [all...] |
/kernel/linux/linux-6.6/sound/core/seq/ |
H A D | seq_midi_event.c | 50 int qlen; member 141 dev->qlen = 0; in reset_encode() 200 dev->qlen = status_event[dev->type].qlen; in snd_midi_event_encode_byte() 202 if (dev->qlen > 0) { in snd_midi_event_encode_byte() 206 dev->qlen--; in snd_midi_event_encode_byte() 210 dev->qlen = status_event[dev->type].qlen - 1; in snd_midi_event_encode_byte() 214 if (dev->qlen == 0) { in snd_midi_event_encode_byte() 320 int qlen; in snd_midi_event_decode() local [all...] |
/kernel/linux/linux-5.10/include/net/ |
H A D | sch_generic.h | 54 __u32 qlen; member 157 return !READ_ONCE(qdisc->q.qlen); in qdisc_is_empty() 509 return this_cpu_ptr(q->cpu_qstats)->qlen; 514 return q->q.qlen; in qdisc_qlen() 519 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum() local 524 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; in qdisc_qlen_sum() 526 qlen += q->q.qlen; in qdisc_qlen_sum() 951 __u32 qlen = qdisc_qlen_sum(sch); qdisc_qstats_copy() local 956 qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, __u32 *backlog) qdisc_qstats_qlen_backlog() argument 969 __u32 qlen, backlog; qdisc_tree_flush_backlog() local 977 __u32 qlen, backlog; qdisc_purge_queue() local [all...] |
H A D | request_sock.h | 140 * qlen - pending TFO requests (still in TCP_SYN_RECV). 160 int qlen; /* # of pending (TCP_SYN_RECV) reqs */ member 178 atomic_t qlen; member 220 atomic_dec(&queue->qlen); in reqsk_queue_removed() 226 atomic_inc(&queue->qlen); in reqsk_queue_added() 231 return atomic_read(&queue->qlen); in reqsk_queue_len()
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | rcu.h | 514 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen), 516 TP_ARGS(rcuname, rhp, qlen), 522 __field(long, qlen) 529 __entry->qlen = qlen; 534 __entry->qlen) 574 long qlen), 576 TP_ARGS(rcuname, rhp, offset, qlen), 582 __field(long, qlen) 589 __entry->qlen [all...] |
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-queue-length.c | 39 atomic_t qlen; /* the number of in-flight I/Os */ member 100 DMEMIT("%d ", atomic_read(&pi->qlen)); in ql_status() 149 atomic_set(&pi->qlen, 0); in ql_add_path() 200 (atomic_read(&pi->qlen) < atomic_read(&best->qlen))) in ql_select_path() 203 if (!atomic_read(&best->qlen)) in ql_select_path() 224 atomic_inc(&pi->qlen); in ql_start_io() 234 atomic_dec(&pi->qlen); in ql_end_io()
|
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-ps-queue-length.c | 40 atomic_t qlen; /* the number of in-flight I/Os */ member 101 DMEMIT("%d ", atomic_read(&pi->qlen)); in ql_status() 153 atomic_set(&pi->qlen, 0); in ql_add_path() 204 (atomic_read(&pi->qlen) < atomic_read(&best->qlen))) in ql_select_path() 207 if (!atomic_read(&best->qlen)) in ql_select_path() 228 atomic_inc(&pi->qlen); in ql_start_io() 238 atomic_dec(&pi->qlen); in ql_end_io()
|
/kernel/linux/linux-6.6/include/net/ |
H A D | sch_generic.h | 68 __u32 qlen; member 185 return !READ_ONCE(qdisc->q.qlen); in qdisc_is_empty() 517 return q->q.qlen; 522 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum() local 527 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; in qdisc_qlen_sum() 529 qlen += q->q.qlen; in qdisc_qlen_sum() 532 return qlen; in qdisc_qlen_sum() 951 __u32 qlen = qdisc_qlen_sum(sch); qdisc_qstats_copy() local 956 qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, __u32 *backlog) qdisc_qstats_qlen_backlog() argument 968 __u32 qlen, backlog; qdisc_tree_flush_backlog() local 976 __u32 qlen, backlog; qdisc_purge_queue() local [all...] |
/kernel/linux/linux-5.10/net/core/ |
H A D | gen_stats.c | 294 qstats->qlen = 0; in __gnet_stats_copy_queue_cpu() 305 __u32 qlen) in __gnet_stats_copy_queue() 310 qstats->qlen = q->qlen; in __gnet_stats_copy_queue() 317 qstats->qlen = qlen; in __gnet_stats_copy_queue() 326 * @qlen: queue length statistics 338 struct gnet_stats_queue *q, __u32 qlen) in gnet_stats_copy_queue() 342 __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); in gnet_stats_copy_queue() 346 d->tc_stats.qlen in gnet_stats_copy_queue() 302 __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, const struct gnet_stats_queue __percpu *cpu, const struct gnet_stats_queue *q, __u32 qlen) __gnet_stats_copy_queue() argument 336 gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue __percpu *cpu_q, struct gnet_stats_queue *q, __u32 qlen) gnet_stats_copy_queue() argument [all...] |
H A D | request_sock.c | 38 queue->fastopenq.qlen = 0; in reqsk_queue_alloc() 72 * The lock also protects other fields such as fastopenq->qlen, which is 98 fastopenq->qlen--; in reqsk_fastopen_remove() 114 * TFO when the qlen exceeds max_qlen. in reqsk_fastopen_remove() 126 fastopenq->qlen++; in reqsk_fastopen_remove()
|
/kernel/linux/linux-5.10/drivers/usb/gadget/legacy/ |
H A D | gmidi.c | 52 static unsigned int qlen = 32; variable 53 module_param(qlen, uint, S_IRUGO); 54 MODULE_PARM_DESC(qlen, "USB read and write request queue length"); 156 midi_opts->qlen = qlen; in midi_bind()
|
/kernel/linux/linux-6.6/drivers/usb/gadget/legacy/ |
H A D | gmidi.c | 52 static unsigned int qlen = 32; variable 53 module_param(qlen, uint, S_IRUGO); 54 MODULE_PARM_DESC(qlen, "USB read and write request queue length"); 156 midi_opts->qlen = qlen; in midi_bind()
|
/kernel/linux/linux-6.6/net/core/ |
H A D | request_sock.c | 38 queue->fastopenq.qlen = 0; in reqsk_queue_alloc() 72 * The lock also protects other fields such as fastopenq->qlen, which is 98 fastopenq->qlen--; in reqsk_fastopen_remove() 114 * TFO when the qlen exceeds max_qlen. in reqsk_fastopen_remove() 126 fastopenq->qlen++; in reqsk_fastopen_remove()
|
/kernel/linux/linux-5.10/drivers/usb/gadget/function/ |
H A D | f_loopback.c | 34 unsigned qlen; member 280 * of requests it submitted (just maintains qlen count), we in loopback_complete() 319 * we buffer at most 'qlen' transfers; We allocate buffers only in alloc_requests() 323 for (i = 0; i < loop->qlen && result == 0; i++) { in alloc_requests() 441 loop->qlen = lb_opts->qlen; in loopback_alloc() 442 if (!loop->qlen) in loopback_alloc() 443 loop->qlen = 32; in loopback_alloc() 479 result = sprintf(page, "%d\n", opts->qlen); in f_lb_opts_qlen_show() 502 opts->qlen in f_lb_opts_qlen_store() [all...] |
/kernel/linux/linux-6.6/drivers/usb/gadget/function/ |
H A D | f_loopback.c | 34 unsigned qlen; member 278 * of requests it submitted (just maintains qlen count), we in loopback_complete() 317 * we buffer at most 'qlen' transfers; We allocate buffers only in alloc_requests() 321 for (i = 0; i < loop->qlen && result == 0; i++) { in alloc_requests() 439 loop->qlen = lb_opts->qlen; in loopback_alloc() 440 if (!loop->qlen) in loopback_alloc() 441 loop->qlen = 32; in loopback_alloc() 477 result = sprintf(page, "%d\n", opts->qlen); in f_lb_opts_qlen_show() 500 opts->qlen in f_lb_opts_qlen_store() [all...] |