1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #ifdef HAVE_CONFIG_H
21 #include <config.h>
22 #endif
23
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include <pulse/xmalloc.h>
29
30 #include <pulsecore/log.h>
31 #include <pulsecore/macro.h>
32 #include <pulsecore/flist.h>
33
34 #include "memblockq.h"
35
36 /* #define MEMBLOCKQ_DEBUG */
pa_memblockq_new( const char *name, int64_t idx, size_t maxlength, size_t tlength, const pa_sample_spec *sample_spec, size_t prebuf, size_t minreq, size_t maxrewind, pa_memchunk *silence)37 pa_memblockq* pa_memblockq_new(
38 const char *name,
39 int64_t idx,
40 size_t maxlength,
41 size_t tlength,
42 const pa_sample_spec *sample_spec,
43 size_t prebuf,
44 size_t minreq,
45 size_t maxrewind,
46 pa_memchunk *silence) {
47
48 pa_memblockq* bq;
49
50 pa_assert(sample_spec);
51 pa_assert(name);
52
53 bq = pa_xnew0(pa_memblockq, 1);
54 bq->name = pa_xstrdup(name);
55
56 bq->sample_spec = *sample_spec;
57 bq->base = pa_frame_size(sample_spec);
58 bq->read_index = bq->write_index = idx;
59
60 pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
61 (unsigned long) maxlength, (unsigned long) tlength, (unsigned long) bq->base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
62
63 bq->in_prebuf = true;
64
65 pa_memblockq_set_maxlength(bq, maxlength);
66 pa_memblockq_set_tlength(bq, tlength);
67 pa_memblockq_set_minreq(bq, minreq);
68 pa_memblockq_set_prebuf(bq, prebuf);
69 pa_memblockq_set_maxrewind(bq, maxrewind);
70
71 pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
72 (unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
73
74 if (silence) {
75 bq->silence = *silence;
76 pa_memblock_ref(bq->silence.memblock);
77 }
78
79 bq->mcalign = pa_mcalign_new(bq->base);
80
81 return bq;
82 }
83
pa_memblockq_free(pa_memblockq* bq)84 void pa_memblockq_free(pa_memblockq* bq) {
85 pa_assert(bq);
86
87 pa_memblockq_silence(bq);
88
89 if (bq->silence.memblock)
90 pa_memblock_unref(bq->silence.memblock);
91
92 if (bq->mcalign)
93 pa_mcalign_free(bq->mcalign);
94
95 pa_xfree(bq->name);
96 pa_xfree(bq);
97 }
98
fix_current_read(pa_memblockq *bq)99 static void fix_current_read(pa_memblockq *bq) {
100 pa_assert(bq);
101
102 if (PA_UNLIKELY(!bq->blocks)) {
103 bq->current_read = NULL;
104 return;
105 }
106
107 if (PA_UNLIKELY(!bq->current_read))
108 bq->current_read = bq->blocks;
109
110 /* Scan left */
111 while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
112
113 if (bq->current_read->prev)
114 bq->current_read = bq->current_read->prev;
115 else
116 break;
117
118 /* Scan right */
119 while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
120 bq->current_read = bq->current_read->next;
121
122 /* At this point current_read will either point at or left of the
123 next block to play. It may be NULL in case everything in
124 the queue was already played */
125 }
126
fix_current_write(pa_memblockq *bq)127 static void fix_current_write(pa_memblockq *bq) {
128 pa_assert(bq);
129
130 if (PA_UNLIKELY(!bq->blocks)) {
131 bq->current_write = NULL;
132 return;
133 }
134
135 if (PA_UNLIKELY(!bq->current_write))
136 bq->current_write = bq->blocks_tail;
137
138 /* Scan right */
139 while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
140
141 if (bq->current_write->next)
142 bq->current_write = bq->current_write->next;
143 else
144 break;
145
146 /* Scan left */
147 while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
148 bq->current_write = bq->current_write->prev;
149
150 /* At this point current_write will either point at or right of
151 the next block to write data to. It may be NULL in case
152 everything in the queue is still to be played */
153 }
154
can_push(pa_memblockq *bq, size_t l)155 static bool can_push(pa_memblockq *bq, size_t l) {
156 int64_t end;
157
158 pa_assert(bq);
159
160 if (bq->read_index > bq->write_index) {
161 int64_t d = bq->read_index - bq->write_index;
162
163 if ((int64_t) l > d)
164 l -= (size_t) d;
165 else
166 return true;
167 }
168
169 end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
170
171 /* Make sure that the list doesn't get too long */
172 if (bq->write_index + (int64_t) l > end)
173 if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
174 return false;
175
176 return true;
177 }
178
write_index_changed(pa_memblockq *bq, int64_t old_write_index, bool account)179 static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, bool account) {
180 int64_t delta;
181
182 pa_assert(bq);
183
184 delta = bq->write_index - old_write_index;
185
186 if (account)
187 bq->requested -= delta;
188 else
189 bq->missing -= delta;
190
191 #ifdef MEMBLOCKQ_DEBUG
192 pa_log_debug("[%s] pushed/seeked %lli: requested counter at %lli, account=%i", bq->name, (long long) delta, (long long) bq->requested, account);
193 #endif
194 }
195
read_index_changed(pa_memblockq *bq, int64_t old_read_index)196 static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
197 int64_t delta;
198
199 pa_assert(bq);
200
201 delta = bq->read_index - old_read_index;
202 bq->missing += delta;
203
204 #ifdef MEMBLOCKQ_DEBUG
205 pa_log_debug("[%s] popped %lli: missing counter at %lli", bq->name, (long long) delta, (long long) bq->missing);
206 #endif
207 }
208
pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk)209 int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
210 struct list_item *q, *n;
211 pa_memchunk chunk;
212 int64_t old;
213
214 pa_assert(bq);
215 pa_assert(uchunk);
216 pa_assert(uchunk->memblock);
217 pa_assert(uchunk->length > 0);
218 pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
219
220 pa_assert(uchunk->length % bq->base == 0);
221 pa_assert(uchunk->index % bq->base == 0);
222
223 if (!can_push(bq, uchunk->length))
224 return -1;
225
226 old = bq->write_index;
227 chunk = *uchunk;
228
229 fix_current_write(bq);
230 q = bq->current_write;
231
232 /* First we advance the q pointer right of where we want to
233 * write to */
234
235 if (q) {
236 while (bq->write_index + (int64_t) chunk.length > q->index)
237 if (q->next)
238 q = q->next;
239 else
240 break;
241 }
242
243 if (!q)
244 q = bq->blocks_tail;
245
246 /* We go from back to front to look for the right place to add
247 * this new entry. Drop data we will overwrite on the way */
248
249 while (q) {
250
251 if (bq->write_index >= q->index + (int64_t) q->chunk.length)
252 /* We found the entry where we need to place the new entry immediately after */
253 break;
254 else if (bq->write_index + (int64_t) chunk.length <= q->index) {
255 /* This entry isn't touched at all, let's skip it */
256 q = q->prev;
257 } else if (bq->write_index <= q->index &&
258 bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
259
260 /* This entry is fully replaced by the new entry, so let's drop it */
261
262 struct list_item *p;
263 p = q;
264 q = q->prev;
265 drop_block(bq, p);
266 } else if (bq->write_index >= q->index) {
267 /* The write index points into this memblock, so let's
268 * truncate or split it */
269
270 if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
271
272 /* We need to save the end of this memchunk */
273 struct list_item *p;
274 size_t d;
275
276 /* Create a new list entry for the end of the memchunk */
277 if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
278 p = pa_xnew(struct list_item, 1);
279
280 p->chunk = q->chunk;
281 pa_memblock_ref(p->chunk.memblock);
282
283 /* Calculate offset */
284 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
285 pa_assert(d > 0);
286
287 /* Drop it from the new entry */
288 p->index = q->index + (int64_t) d;
289 p->chunk.length -= d;
290
291 /* Add it to the list */
292 p->prev = q;
293 if ((p->next = q->next))
294 q->next->prev = p;
295 else
296 bq->blocks_tail = p;
297 q->next = p;
298
299 bq->n_blocks++;
300 }
301
302 /* Truncate the chunk */
303 if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
304 struct list_item *p;
305 p = q;
306 q = q->prev;
307 drop_block(bq, p);
308 }
309
310 /* We had to truncate this block, hence we're now at the right position */
311 break;
312 } else {
313 size_t d;
314
315 pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
316 bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
317 bq->write_index < q->index);
318
319 /* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
320
321 d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
322 q->index += (int64_t) d;
323 q->chunk.index += d;
324 q->chunk.length -= d;
325
326 q = q->prev;
327 }
328 }
329
330 if (q) {
331 pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
332 pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
333
334 /* Try to merge memory blocks */
335
336 if (q->chunk.memblock == chunk.memblock &&
337 q->chunk.index + q->chunk.length == chunk.index &&
338 bq->write_index == q->index + (int64_t) q->chunk.length) {
339
340 q->chunk.length += chunk.length;
341 bq->write_index += (int64_t) chunk.length;
342 goto finish;
343 }
344 } else
345 pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
346
347 if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
348 n = pa_xnew(struct list_item, 1);
349
350 n->chunk = chunk;
351 pa_memblock_ref(n->chunk.memblock);
352 n->index = bq->write_index;
353 bq->write_index += (int64_t) n->chunk.length;
354
355 n->next = q ? q->next : bq->blocks;
356 n->prev = q;
357
358 if (n->next)
359 n->next->prev = n;
360 else
361 bq->blocks_tail = n;
362
363 if (n->prev)
364 n->prev->next = n;
365 else
366 bq->blocks = n;
367
368 bq->n_blocks++;
369
370 finish:
371
372 write_index_changed(bq, old, true);
373 return 0;
374 }
375
pa_memblockq_prebuf_active(pa_memblockq *bq)376 bool pa_memblockq_prebuf_active(pa_memblockq *bq) {
377 pa_assert(bq);
378
379 if (bq->in_prebuf)
380 return pa_memblockq_get_length(bq) < bq->prebuf;
381 else
382 return bq->prebuf > 0 && bq->read_index >= bq->write_index;
383 }
384
update_prebuf(pa_memblockq *bq)385 static bool update_prebuf(pa_memblockq *bq) {
386 pa_assert(bq);
387
388 if (bq->in_prebuf) {
389
390 if (pa_memblockq_get_length(bq) < bq->prebuf)
391 return true;
392
393 bq->in_prebuf = false;
394 return false;
395 } else {
396
397 if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
398 bq->in_prebuf = true;
399 return true;
400 }
401
402 return false;
403 }
404 }
405
pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk)406 int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
407 int64_t d;
408 pa_assert(bq);
409 pa_assert(chunk);
410
411 /* We need to pre-buffer */
412 if (update_prebuf(bq))
413 return -1;
414
415 fix_current_read(bq);
416
417 /* Do we need to spit out silence? */
418 if (!bq->current_read || bq->current_read->index > bq->read_index) {
419 size_t length;
420
421 /* How much silence shall we return? */
422 if (bq->current_read)
423 length = (size_t) (bq->current_read->index - bq->read_index);
424 else if (bq->write_index > bq->read_index)
425 length = (size_t) (bq->write_index - bq->read_index);
426 else
427 length = 0;
428
429 /* We need to return silence, since no data is yet available */
430 if (bq->silence.memblock) {
431 *chunk = bq->silence;
432 pa_memblock_ref(chunk->memblock);
433
434 if (length > 0 && length < chunk->length)
435 chunk->length = length;
436
437 } else {
438
439 /* If the memblockq is empty, return -1, otherwise return
440 * the time to sleep */
441 if (length <= 0)
442 return -1;
443
444 chunk->memblock = NULL;
445 chunk->length = length;
446 }
447
448 chunk->index = 0;
449 return 0;
450 }
451
452 /* Ok, let's pass real data to the caller */
453 *chunk = bq->current_read->chunk;
454 pa_memblock_ref(chunk->memblock);
455
456 pa_assert(bq->read_index >= bq->current_read->index);
457 d = bq->read_index - bq->current_read->index;
458 chunk->index += (size_t) d;
459 chunk->length -= (size_t) d;
460
461 return 0;
462 }
463
pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk)464 int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
465 pa_mempool *pool;
466 pa_memchunk tchunk, rchunk;
467 int64_t ri;
468 struct list_item *item;
469
470 pa_assert(bq);
471 pa_assert(block_size > 0);
472 pa_assert(chunk);
473 pa_assert(bq->silence.memblock);
474
475 if (pa_memblockq_peek(bq, &tchunk) < 0)
476 return -1;
477
478 if (tchunk.length >= block_size) {
479 *chunk = tchunk;
480 chunk->length = block_size;
481 return 0;
482 }
483
484 pool = pa_memblock_get_pool(tchunk.memblock);
485 rchunk.memblock = pa_memblock_new(pool, block_size);
486 rchunk.index = 0;
487 rchunk.length = tchunk.length;
488 pa_mempool_unref(pool), pool = NULL;
489
490 pa_memchunk_memcpy(&rchunk, &tchunk);
491 pa_memblock_unref(tchunk.memblock);
492
493 rchunk.index += tchunk.length;
494
495 /* We don't need to call fix_current_read() here, since
496 * pa_memblock_peek() already did that */
497 item = bq->current_read;
498 ri = bq->read_index + tchunk.length;
499
500 while (rchunk.index < block_size) {
501
502 if (!item || item->index > ri) {
503 /* Do we need to append silence? */
504 tchunk = bq->silence;
505
506 if (item)
507 tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
508
509 } else {
510 int64_t d;
511
512 /* We can append real data! */
513 tchunk = item->chunk;
514
515 d = ri - item->index;
516 tchunk.index += (size_t) d;
517 tchunk.length -= (size_t) d;
518
519 /* Go to next item for the next iteration */
520 item = item->next;
521 }
522
523 rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
524 pa_memchunk_memcpy(&rchunk, &tchunk);
525
526 rchunk.index += rchunk.length;
527 ri += rchunk.length;
528 }
529
530 rchunk.index = 0;
531 rchunk.length = block_size;
532
533 *chunk = rchunk;
534 return 0;
535 }
536
pa_memblockq_drop(pa_memblockq *bq, size_t length)537 void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
538 int64_t old;
539 pa_assert(bq);
540 pa_assert(length % bq->base == 0);
541
542 old = bq->read_index;
543
544 while (length > 0) {
545
546 /* Do not drop any data when we are in prebuffering mode */
547 if (update_prebuf(bq))
548 break;
549
550 fix_current_read(bq);
551
552 if (bq->current_read) {
553 int64_t p, d;
554
555 /* We go through this piece by piece to make sure we don't
556 * drop more than allowed by prebuf */
557
558 p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
559 pa_assert(p >= bq->read_index);
560 d = p - bq->read_index;
561
562 if (d > (int64_t) length)
563 d = (int64_t) length;
564
565 bq->read_index += d;
566 length -= (size_t) d;
567
568 } else {
569
570 /* The list is empty, there's nothing we could drop */
571 bq->read_index += (int64_t) length;
572 break;
573 }
574 }
575
576 drop_backlog(bq);
577 read_index_changed(bq, old);
578 }
579
pa_memblockq_rewind(pa_memblockq *bq, size_t length)580 void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
581 int64_t old;
582 pa_assert(bq);
583 pa_assert(length % bq->base == 0);
584
585 old = bq->read_index;
586
587 /* This is kind of the inverse of pa_memblockq_drop() */
588
589 bq->read_index -= (int64_t) length;
590
591 read_index_changed(bq, old);
592 }
593
pa_memblockq_is_readable(pa_memblockq *bq)594 bool pa_memblockq_is_readable(pa_memblockq *bq) {
595 pa_assert(bq);
596
597 if (pa_memblockq_prebuf_active(bq))
598 return false;
599
600 if (pa_memblockq_get_length(bq) <= 0)
601 return false;
602
603 return true;
604 }
605
pa_memblockq_get_length(pa_memblockq *bq)606 size_t pa_memblockq_get_length(pa_memblockq *bq) {
607 pa_assert(bq);
608
609 if (bq->write_index <= bq->read_index)
610 return 0;
611
612 return (size_t) (bq->write_index - bq->read_index);
613 }
614
pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, bool account)615 void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, bool account) {
616 int64_t old;
617 pa_assert(bq);
618
619 old = bq->write_index;
620
621 switch (seek) {
622 case PA_SEEK_RELATIVE:
623 bq->write_index += offset;
624 break;
625 case PA_SEEK_ABSOLUTE:
626 bq->write_index = offset;
627 break;
628 case PA_SEEK_RELATIVE_ON_READ:
629 bq->write_index = bq->read_index + offset;
630 break;
631 case PA_SEEK_RELATIVE_END:
632 bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
633 break;
634 default:
635 pa_assert_not_reached();
636 }
637
638 drop_backlog(bq);
639 write_index_changed(bq, old, account);
640 }
641
pa_memblockq_flush_write(pa_memblockq *bq, bool account)642 void pa_memblockq_flush_write(pa_memblockq *bq, bool account) {
643 int64_t old;
644 pa_assert(bq);
645
646 pa_memblockq_silence(bq);
647
648 old = bq->write_index;
649 bq->write_index = bq->read_index;
650
651 pa_memblockq_prebuf_force(bq);
652 write_index_changed(bq, old, account);
653 }
654
pa_memblockq_flush_read(pa_memblockq *bq)655 void pa_memblockq_flush_read(pa_memblockq *bq) {
656 int64_t old;
657 pa_assert(bq);
658
659 pa_memblockq_silence(bq);
660
661 old = bq->read_index;
662 bq->read_index = bq->write_index;
663
664 pa_memblockq_prebuf_force(bq);
665 read_index_changed(bq, old);
666 }
667
pa_memblockq_get_tlength(pa_memblockq *bq)668 size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
669 pa_assert(bq);
670
671 return bq->tlength;
672 }
673
pa_memblockq_get_minreq(pa_memblockq *bq)674 size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
675 pa_assert(bq);
676
677 return bq->minreq;
678 }
679
pa_memblockq_get_maxrewind(pa_memblockq *bq)680 size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
681 pa_assert(bq);
682
683 return bq->maxrewind;
684 }
685
pa_memblockq_get_read_index(pa_memblockq *bq)686 int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
687 pa_assert(bq);
688
689 return bq->read_index;
690 }
691
pa_memblockq_get_write_index(pa_memblockq *bq)692 int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
693 pa_assert(bq);
694
695 return bq->write_index;
696 }
697
pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk)698 int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
699 pa_memchunk rchunk;
700
701 pa_assert(bq);
702 pa_assert(chunk);
703
704 if (bq->base == 1)
705 return pa_memblockq_push(bq, chunk);
706
707 if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
708 return -1;
709
710 pa_mcalign_push(bq->mcalign, chunk);
711
712 while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
713 int r;
714 r = pa_memblockq_push(bq, &rchunk);
715 pa_memblock_unref(rchunk.memblock);
716
717 if (r < 0) {
718 pa_mcalign_flush(bq->mcalign);
719 return -1;
720 }
721 }
722
723 return 0;
724 }
725
pa_memblockq_prebuf_disable(pa_memblockq *bq)726 void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
727 pa_assert(bq);
728
729 bq->in_prebuf = false;
730 }
731
pa_memblockq_prebuf_force(pa_memblockq *bq)732 void pa_memblockq_prebuf_force(pa_memblockq *bq) {
733 pa_assert(bq);
734
735 if (bq->prebuf > 0)
736 bq->in_prebuf = true;
737 }
738
pa_memblockq_get_maxlength(pa_memblockq *bq)739 size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
740 pa_assert(bq);
741
742 return bq->maxlength;
743 }
744
pa_memblockq_get_prebuf(pa_memblockq *bq)745 size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
746 pa_assert(bq);
747
748 return bq->prebuf;
749 }
750
pa_memblockq_pop_missing(pa_memblockq *bq)751 size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
752 size_t l;
753
754 pa_assert(bq);
755
756 #ifdef MEMBLOCKQ_DEBUG
757 pa_log_debug("[%s] pop: %lli", bq->name, (long long) bq->missing);
758 #endif
759
760 if (bq->missing <= 0)
761 return 0;
762
763 if (((size_t) bq->missing < bq->minreq) &&
764 !pa_memblockq_prebuf_active(bq))
765 return 0;
766
767 l = (size_t) bq->missing;
768
769 bq->requested += bq->missing;
770 bq->missing = 0;
771
772 #ifdef MEMBLOCKQ_DEBUG
773 pa_log_debug("[%s] sent %lli: request counter is at %lli", bq->name, (long long) l, (long long) bq->requested);
774 #endif
775
776 return l;
777 }
778
pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength)779 void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
780 pa_assert(bq);
781
782 bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
783
784 if (bq->maxlength < bq->base)
785 bq->maxlength = bq->base;
786
787 if (bq->tlength > bq->maxlength)
788 pa_memblockq_set_tlength(bq, bq->maxlength);
789 }
790
pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength)791 void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
792 size_t old_tlength;
793 pa_assert(bq);
794
795 if (tlength <= 0 || tlength == (size_t) -1)
796 tlength = bq->maxlength;
797
798 old_tlength = bq->tlength;
799 bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
800
801 if (bq->tlength > bq->maxlength)
802 bq->tlength = bq->maxlength;
803
804 if (bq->minreq > bq->tlength)
805 pa_memblockq_set_minreq(bq, bq->tlength);
806
807 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
808 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
809
810 bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
811 }
812
pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq)813 void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
814 pa_assert(bq);
815
816 bq->minreq = (minreq/bq->base)*bq->base;
817
818 if (bq->minreq > bq->tlength)
819 bq->minreq = bq->tlength;
820
821 if (bq->minreq < bq->base)
822 bq->minreq = bq->base;
823
824 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
825 pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
826 }
827
pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf)828 void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
829 pa_assert(bq);
830
831 if (prebuf == (size_t) -1)
832 prebuf = bq->tlength+bq->base-bq->minreq;
833
834 bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
835
836 if (prebuf > 0 && bq->prebuf < bq->base)
837 bq->prebuf = bq->base;
838
839 if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
840 bq->prebuf = bq->tlength+bq->base-bq->minreq;
841
842 if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
843 bq->in_prebuf = false;
844 }
845
pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind)846 void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
847 pa_assert(bq);
848
849 bq->maxrewind = (maxrewind/bq->base)*bq->base;
850 }
851
pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a)852 void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
853 pa_assert(bq);
854 pa_assert(a);
855
856 pa_memblockq_set_maxlength(bq, a->maxlength);
857 pa_memblockq_set_tlength(bq, a->tlength);
858 pa_memblockq_set_minreq(bq, a->minreq);
859 pa_memblockq_set_prebuf(bq, a->prebuf);
860 }
861
pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a)862 void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
863 pa_assert(bq);
864 pa_assert(a);
865
866 a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
867 a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
868 a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
869 a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
870 }
871
pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source)872 int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
873
874 pa_assert(bq);
875 pa_assert(source);
876
877 pa_memblockq_prebuf_disable(bq);
878
879 for (;;) {
880 pa_memchunk chunk;
881
882 if (pa_memblockq_peek(source, &chunk) < 0)
883 return 0;
884
885 pa_assert(chunk.length > 0);
886
887 if (chunk.memblock) {
888
889 if (pa_memblockq_push_align(bq, &chunk) < 0) {
890 pa_memblock_unref(chunk.memblock);
891 return -1;
892 }
893
894 pa_memblock_unref(chunk.memblock);
895 } else
896 pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, true);
897
898 pa_memblockq_drop(bq, chunk.length);
899 }
900 }
901
pa_memblockq_willneed(pa_memblockq *bq)902 void pa_memblockq_willneed(pa_memblockq *bq) {
903 struct list_item *q;
904
905 pa_assert(bq);
906
907 fix_current_read(bq);
908
909 for (q = bq->current_read; q; q = q->next)
910 pa_memchunk_will_need(&q->chunk);
911 }
912
pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence)913 void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
914 pa_assert(bq);
915
916 if (bq->silence.memblock)
917 pa_memblock_unref(bq->silence.memblock);
918
919 if (silence) {
920 bq->silence = *silence;
921 pa_memblock_ref(bq->silence.memblock);
922 } else
923 pa_memchunk_reset(&bq->silence);
924 }
925
pa_memblockq_is_empty(pa_memblockq *bq)926 bool pa_memblockq_is_empty(pa_memblockq *bq) {
927 pa_assert(bq);
928
929 return !bq->blocks;
930 }
931
pa_memblockq_silence(pa_memblockq *bq)932 void pa_memblockq_silence(pa_memblockq *bq) {
933 pa_assert(bq);
934
935 while (bq->blocks)
936 drop_block(bq, bq->blocks);
937
938 pa_assert(bq->n_blocks == 0);
939 }
940
pa_memblockq_get_nblocks(pa_memblockq *bq)941 unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
942 pa_assert(bq);
943
944 return bq->n_blocks;
945 }
946
pa_memblockq_get_base(pa_memblockq *bq)947 size_t pa_memblockq_get_base(pa_memblockq *bq) {
948 pa_assert(bq);
949
950 return bq->base;
951 }
952