1/***
2  This file is part of PulseAudio.
3
4  Copyright 2004-2006 Lennart Poettering
5  Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7  PulseAudio is free software; you can redistribute it and/or modify
8  it under the terms of the GNU Lesser General Public License as
9  published by the Free Software Foundation; either version 2.1 of the
10  License, or (at your option) any later version.
11
12  PulseAudio is distributed in the hope that it will be useful, but
13  WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  Lesser General Public License for more details
16
17  You should have received a copy of the GNU Lesser General Public
18  License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19***/
20
21#ifdef HAVE_CONFIG_H
22#include <config.h>
23#endif
24
25#ifndef LOG_TAG
26#define LOG_TAG "Memblock"
27#endif
28
29#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33#include <signal.h>
34#include <errno.h>
35
36#ifdef HAVE_VALGRIND_MEMCHECK_H
37#include <valgrind/memcheck.h>
38#endif
39
40#include <pulse/xmalloc.h>
41#include <pulse/def.h>
42
43#include <pulsecore/log.h>
44#include <pulsecore/hashmap.h>
45#include <pulsecore/semaphore.h>
46#include <pulsecore/mutex.h>
47#include <pulsecore/macro.h>
48#include <pulsecore/llist.h>
49#include <pulsecore/core-util.h>
50
51#include "memblock.h"
52
53/* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
54 * note that the footprint is usually much smaller, since the data is
55 * stored in SHM and our OS does not commit the memory before we use
56 * it for the first time. */
57#define PA_MEMPOOL_SLOTS_MAX 1024
58#define PA_MEMPOOL_SLOT_SIZE (64*1024)
59
60#define PA_MEMEXPORT_SLOTS_MAX 128
61
62#define PA_MEMIMPORT_SLOTS_MAX 160
63#define PA_MEMIMPORT_SEGMENTS_MAX 16
64/*
65 * If true, this segment's lifetime will not be limited by the
66 * number of active blocks (seg->n_blocks) using its shared memory.
67 * Rather, it will exist for the full lifetime of the memimport it
68 * is attached to.
69 *
70 * This is done to support memfd blocks transport.
71 *
72 * To transfer memfd-backed blocks without passing their fd every
73 * time, thus minimizing overhead and avoiding fd leaks, a command
74 * is sent with the memfd fd as ancil data very early on.
75 *
76 * This command has an ID that identifies the memfd region. Further
77 * block references are then exclusively done using this ID. On the
78 * receiving end, such logic is enabled by the memimport's segment
79 * hash and 'permanent' segments below.
80 */
81static bool segment_is_permanent(pa_memimport_segment *seg) {
82    pa_assert(seg);
83    return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
84}
85
86/* A collection of multiple segments */
87struct pa_memimport {
88    pa_mutex *mutex;
89
90    pa_mempool *pool;
91    pa_hashmap *segments;
92    pa_hashmap *blocks;
93
94    /* Called whenever an imported memory block is no longer
95     * needed. */
96    pa_memimport_release_cb_t release_cb;
97    void *userdata;
98
99    PA_LLIST_FIELDS(pa_memimport);
100};
101
102struct memexport_slot {
103    PA_LLIST_FIELDS(struct memexport_slot);
104    pa_memblock *block;
105};
106
107struct pa_memexport {
108    pa_mutex *mutex;
109    pa_mempool *pool;
110
111    struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
112
113    PA_LLIST_HEAD(struct memexport_slot, free_slots);
114    PA_LLIST_HEAD(struct memexport_slot, used_slots);
115    unsigned n_init;
116    unsigned baseidx;
117
118    /* Called whenever a client from which we imported a memory block
119       which we in turn exported to another client dies and we need to
120       revoke the memory block accordingly */
121    pa_memexport_revoke_cb_t revoke_cb;
122    void *userdata;
123
124    PA_LLIST_FIELDS(pa_memexport);
125};
126
127struct pa_mempool {
128    /* Reference count the mempool
129     *
130     * Any block allocation from the pool itself, or even just imported from
131     * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
132     * shall increase the refcount.
133     *
134     * This is done for per-client mempools: global references to blocks in
135     * the pool, or just to attached ones, can still be lingering around when
136     * the client connection dies and all per-client objects are to be freed.
137     * That is, current PulseAudio design does not guarantee that the client
138     * mempool blocks are referenced only by client-specific objects.
139     *
140     * For further details, please check:
141     * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
142     */
143    PA_REFCNT_DECLARE;
144
145    pa_semaphore *semaphore;
146    pa_mutex *mutex;
147
148    pa_shm memory;
149
150    bool global;
151
152    size_t block_size;
153    unsigned n_blocks;
154    bool is_remote_writable;
155
156    pa_atomic_t n_init;
157
158    PA_LLIST_HEAD(pa_memimport, imports);
159    PA_LLIST_HEAD(pa_memexport, exports);
160
161    /* A list of free slots that may be reused */
162    pa_flist *free_slots;
163
164    pa_mempool_stat stat;
165};
166
167static void segment_detach(pa_memimport_segment *seg);
168
169PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
170
171/* No lock necessary */
172static void stat_add(pa_memblock*b) {
173    pa_assert(b);
174    pa_assert(b->pool);
175
176    pa_atomic_inc(&b->pool->stat.n_allocated);
177    pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
178
179    pa_atomic_inc(&b->pool->stat.n_accumulated);
180    pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
181
182    if (b->type == PA_MEMBLOCK_IMPORTED) {
183        pa_atomic_inc(&b->pool->stat.n_imported);
184        pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
185    }
186
187    pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
188    pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
189}
190
191/* No lock necessary */
192static void stat_remove(pa_memblock *b) {
193    pa_assert(b);
194    pa_assert(b->pool);
195
196    pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
197    pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
198
199    pa_atomic_dec(&b->pool->stat.n_allocated);
200    pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
201
202    if (b->type == PA_MEMBLOCK_IMPORTED) {
203        pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
204        pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
205
206        pa_atomic_dec(&b->pool->stat.n_imported);
207        pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
208    }
209
210    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
211}
212
213static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
214
215/* No lock necessary */
216pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
217    pa_memblock *b;
218
219    pa_assert(p);
220    pa_assert(length);
221
222    if (!(b = pa_memblock_new_pool(p, length)))
223        b = memblock_new_appended(p, length);
224
225    return b;
226}
227
228/* No lock necessary */
229static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
230    pa_memblock *b;
231
232    pa_assert(p);
233    pa_assert(length);
234
235    /* If -1 is passed as length we choose the size for the caller. */
236
237    if (length == (size_t) -1)
238        length = pa_mempool_block_size_max(p);
239
240    b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
241    PA_REFCNT_INIT(b);
242    b->pool = p;
243    pa_mempool_ref(b->pool);
244    b->type = PA_MEMBLOCK_APPENDED;
245    b->read_only = b->is_silence = false;
246    pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
247    b->length = length;
248    pa_atomic_store(&b->n_acquired, 0);
249    pa_atomic_store(&b->please_signal, 0);
250
251    stat_add(b);
252    return b;
253}
254
255/* No lock necessary */
256static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
257    struct mempool_slot *slot;
258    pa_assert(p);
259
260    if (!(slot = pa_flist_pop(p->free_slots))) {
261        int idx;
262
263        /* The free list was empty, we have to allocate a new entry */
264
265        if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
266            pa_atomic_dec(&p->n_init);
267        else
268            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
269
270        if (!slot) {
271            if (pa_log_ratelimit(PA_LOG_DEBUG))
272                pa_log_debug("Pool full");
273            pa_atomic_inc(&p->stat.n_pool_full);
274            return NULL;
275        }
276    }
277
278/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
279/*     if (PA_UNLIKELY(pa_in_valgrind())) { */
280/*         VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
281/*     } */
282/* #endif */
283
284    return slot;
285}
286
287/* No lock necessary, totally redundant anyway */
288static inline void* mempool_slot_data(struct mempool_slot *slot) {
289    return slot;
290}
291
292/* No lock necessary */
293static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
294    pa_assert(p);
295
296    pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
297    pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
298
299    return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
300}
301
302/* No lock necessary */
303static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
304    unsigned idx;
305
306    if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
307        return NULL;
308
309    return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
310}
311
312/* No lock necessary */
313bool pa_mempool_is_remote_writable(pa_mempool *p) {
314    pa_assert(p);
315    return p->is_remote_writable;
316}
317
318/* No lock necessary */
319void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
320    pa_assert(p);
321    pa_assert(!writable || pa_mempool_is_shared(p));
322    p->is_remote_writable = writable;
323}
324
325/* No lock necessary */
326pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
327    pa_memblock *b = NULL;
328    struct mempool_slot *slot;
329    static int mempool_disable = 0;
330
331    pa_assert(p);
332    pa_assert(length);
333
334    if (mempool_disable == 0)
335        mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
336
337    if (mempool_disable > 0)
338        return NULL;
339
340    /* If -1 is passed as length we choose the size for the caller: we
341     * take the largest size that fits in one of our slots. */
342
343    if (length == (size_t) -1)
344        length = pa_mempool_block_size_max(p);
345
346    if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
347
348        if (!(slot = mempool_allocate_slot(p)))
349            return NULL;
350
351        b = mempool_slot_data(slot);
352        b->type = PA_MEMBLOCK_POOL;
353        pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
354
355    } else if (p->block_size >= length) {
356
357        if (!(slot = mempool_allocate_slot(p)))
358            return NULL;
359
360        if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
361            b = pa_xnew(pa_memblock, 1);
362
363        b->type = PA_MEMBLOCK_POOL_EXTERNAL;
364        pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
365
366    } else {
367        pa_log_debug("Memory block too large for pool: %lu > %lu",
368            (unsigned long) length, (unsigned long) p->block_size);
369        pa_atomic_inc(&p->stat.n_too_large_for_pool);
370        return NULL;
371    }
372
373    PA_REFCNT_INIT(b);
374    b->pool = p;
375    pa_mempool_ref(b->pool);
376    b->read_only = b->is_silence = false;
377    b->length = length;
378    pa_atomic_store(&b->n_acquired, 0);
379    pa_atomic_store(&b->please_signal, 0);
380
381    stat_add(b);
382    return b;
383}
384
385/* No lock necessary */
386pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
387    pa_memblock *b;
388
389    pa_assert(p);
390    pa_assert(d);
391    pa_assert(length != (size_t) -1);
392    pa_assert(length);
393
394    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
395        b = pa_xnew(pa_memblock, 1);
396
397    PA_REFCNT_INIT(b);
398    b->pool = p;
399    pa_mempool_ref(b->pool);
400    b->type = PA_MEMBLOCK_FIXED;
401    b->read_only = read_only;
402    b->is_silence = false;
403    pa_atomic_ptr_store(&b->data, d);
404    b->length = length;
405    pa_atomic_store(&b->n_acquired, 0);
406    pa_atomic_store(&b->please_signal, 0);
407
408    stat_add(b);
409    return b;
410}
411
412/* No lock necessary */
413pa_memblock *pa_memblock_new_user(
414        pa_mempool *p,
415        void *d,
416        size_t length,
417        pa_free_cb_t free_cb,
418        void *free_cb_data,
419        bool read_only) {
420    pa_memblock *b;
421
422    pa_assert(p);
423    pa_assert(d);
424    pa_assert(length);
425    pa_assert(length != (size_t) -1);
426    pa_assert(free_cb);
427
428    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
429        b = pa_xnew(pa_memblock, 1);
430
431    PA_REFCNT_INIT(b);
432    b->pool = p;
433    pa_mempool_ref(b->pool);
434    b->type = PA_MEMBLOCK_USER;
435    b->read_only = read_only;
436    b->is_silence = false;
437    pa_atomic_ptr_store(&b->data, d);
438    b->length = length;
439    pa_atomic_store(&b->n_acquired, 0);
440    pa_atomic_store(&b->please_signal, 0);
441
442    b->per_type.user.free_cb = free_cb;
443    b->per_type.user.free_cb_data = free_cb_data;
444
445    stat_add(b);
446    return b;
447}
448
449/* No lock necessary */
450bool pa_memblock_is_ours(pa_memblock *b) {
451    pa_assert(b);
452    pa_assert(PA_REFCNT_VALUE(b) > 0);
453
454    return b->type != PA_MEMBLOCK_IMPORTED;
455}
456
457/* No lock necessary */
458bool pa_memblock_is_read_only(pa_memblock *b) {
459    pa_assert(b);
460    pa_assert(PA_REFCNT_VALUE(b) > 0);
461
462    return b->read_only || PA_REFCNT_VALUE(b) > 1;
463}
464
465/* No lock necessary */
466bool pa_memblock_is_silence(pa_memblock *b) {
467    pa_assert(b);
468    pa_assert(PA_REFCNT_VALUE(b) > 0);
469
470    return b->is_silence;
471}
472
473/* No lock necessary */
474void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
475    pa_assert(b);
476    pa_assert(PA_REFCNT_VALUE(b) > 0);
477
478    b->is_silence = v;
479}
480
481/* No lock necessary */
482bool pa_memblock_ref_is_one(pa_memblock *b) {
483    int r;
484    pa_assert(b);
485
486    pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
487
488    return r == 1;
489}
490
491/* No lock necessary */
492void* pa_memblock_acquire(pa_memblock *b) {
493    pa_assert(b);
494    pa_assert(PA_REFCNT_VALUE(b) > 0);
495
496    pa_atomic_inc(&b->n_acquired);
497
498    return pa_atomic_ptr_load(&b->data);
499}
500
501/* No lock necessary */
502void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
503    pa_assert(c);
504
505    return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
506}
507
508/* No lock necessary, in corner cases locks by its own */
509void pa_memblock_release(pa_memblock *b) {
510    int r;
511    pa_assert(b);
512    pa_assert(PA_REFCNT_VALUE(b) > 0);
513
514    r = pa_atomic_dec(&b->n_acquired);
515    pa_assert(r >= 1);
516
517    /* Signal a waiting thread that this memblock is no longer used */
518    if (r == 1 && pa_atomic_load(&b->please_signal))
519        pa_semaphore_post(b->pool->semaphore);
520}
521
522size_t pa_memblock_get_length(pa_memblock *b) {
523    pa_assert(b);
524    pa_assert(PA_REFCNT_VALUE(b) > 0);
525
526    return b->length;
527}
528
529/* Note! Always unref the returned pool after use */
530pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
531    pa_assert(b);
532    pa_assert(PA_REFCNT_VALUE(b) > 0);
533    pa_assert(b->pool);
534
535    pa_mempool_ref(b->pool);
536    return b->pool;
537}
538
539/* No lock necessary */
540pa_memblock* pa_memblock_ref(pa_memblock*b) {
541    pa_assert(b);
542    pa_assert(PA_REFCNT_VALUE(b) > 0);
543
544    PA_REFCNT_INC(b);
545    return b;
546}
547
548static void memblock_free(pa_memblock *b) {
549    pa_mempool *pool;
550
551    pa_assert(b);
552    pa_assert(b->pool);
553    pa_assert(pa_atomic_load(&b->n_acquired) == 0);
554
555    pool = b->pool;
556    stat_remove(b);
557
558    switch (b->type) {
559        case PA_MEMBLOCK_USER :
560            pa_assert(b->per_type.user.free_cb);
561            b->per_type.user.free_cb(b->per_type.user.free_cb_data);
562
563            /* Fall through */
564
565        case PA_MEMBLOCK_FIXED:
566            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
567                pa_xfree(b);
568
569            break;
570
571        case PA_MEMBLOCK_APPENDED:
572
573            /* We could attach it to unused_memblocks, but that would
574             * probably waste some considerable amount of memory */
575            pa_xfree(b);
576            break;
577
578        case PA_MEMBLOCK_IMPORTED: {
579            pa_memimport_segment *segment;
580            pa_memimport *import;
581
582            /* FIXME! This should be implemented lock-free */
583
584            pa_assert_se(segment = b->per_type.imported.segment);
585            pa_assert_se(import = segment->import);
586
587            pa_mutex_lock(import->mutex);
588
589            pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
590
591            pa_assert(segment->n_blocks >= 1);
592            if (-- segment->n_blocks <= 0)
593                segment_detach(segment);
594
595            pa_mutex_unlock(import->mutex);
596
597            import->release_cb(import, b->per_type.imported.id, import->userdata);
598
599            if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
600                pa_xfree(b);
601
602            break;
603        }
604
605        case PA_MEMBLOCK_POOL_EXTERNAL:
606        case PA_MEMBLOCK_POOL: {
607            struct mempool_slot *slot;
608            bool call_free;
609
610            pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
611
612            call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
613
614/* #ifdef HAVE_VALGRIND_MEMCHECK_H */
615/*             if (PA_UNLIKELY(pa_in_valgrind())) { */
616/*                 VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
617/*             } */
618/* #endif */
619
620            /* The free list dimensions should easily allow all slots
621             * to fit in, hence try harder if pushing this slot into
622             * the free list fails */
623            while (pa_flist_push(b->pool->free_slots, slot) < 0)
624                ;
625
626            if (call_free)
627                if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
628                    pa_xfree(b);
629
630            break;
631        }
632
633        case PA_MEMBLOCK_TYPE_MAX:
634        default:
635            pa_assert_not_reached();
636    }
637
638    pa_mempool_unref(pool);
639}
640
641/* No lock necessary */
642void pa_memblock_unref(pa_memblock*b) {
643    pa_assert(b);
644    pa_assert(PA_REFCNT_VALUE(b) > 0);
645
646    if (PA_REFCNT_DEC(b) > 0)
647        return;
648
649    memblock_free(b);
650}
651
652/* Self locked */
653static void memblock_wait(pa_memblock *b) {
654    pa_assert(b);
655
656    if (pa_atomic_load(&b->n_acquired) > 0) {
657        /* We need to wait until all threads gave up access to the
658         * memory block before we can go on. Unfortunately this means
659         * that we have to lock and wait here. Sniff! */
660
661        pa_atomic_inc(&b->please_signal);
662
663        while (pa_atomic_load(&b->n_acquired) > 0)
664            pa_semaphore_wait(b->pool->semaphore);
665
666        pa_atomic_dec(&b->please_signal);
667    }
668}
669
670/* No lock necessary. This function is not multiple caller safe! */
671static void memblock_make_local(pa_memblock *b) {
672    pa_assert(b);
673
674    pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
675
676    if (b->length <= b->pool->block_size) {
677        struct mempool_slot *slot;
678
679        if ((slot = mempool_allocate_slot(b->pool))) {
680            void *new_data;
681            /* We can move it into a local pool, perfect! */
682
683            new_data = mempool_slot_data(slot);
684            memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
685            pa_atomic_ptr_store(&b->data, new_data);
686
687            b->type = PA_MEMBLOCK_POOL_EXTERNAL;
688            b->read_only = false;
689
690            goto finish;
691        }
692    }
693
694    /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
695    b->per_type.user.free_cb = pa_xfree;
696    pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
697    b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
698
699    b->type = PA_MEMBLOCK_USER;
700    b->read_only = false;
701
702finish:
703    pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
704    pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
705    memblock_wait(b);
706}
707
708/* No lock necessary. This function is not multiple caller safe */
709void pa_memblock_unref_fixed(pa_memblock *b) {
710    pa_assert(b);
711    pa_assert(PA_REFCNT_VALUE(b) > 0);
712    pa_assert(b->type == PA_MEMBLOCK_FIXED);
713
714    if (PA_REFCNT_VALUE(b) > 1)
715        memblock_make_local(b);
716
717    pa_memblock_unref(b);
718}
719
720/* No lock necessary. */
721pa_memblock *pa_memblock_will_need(pa_memblock *b) {
722    void *p;
723
724    pa_assert(b);
725    pa_assert(PA_REFCNT_VALUE(b) > 0);
726
727    p = pa_memblock_acquire(b);
728    pa_will_need(p, b->length);
729    pa_memblock_release(b);
730
731    return b;
732}
733
734/* Self-locked. This function is not multiple-caller safe */
735static void memblock_replace_import(pa_memblock *b) {
736    pa_memimport_segment *segment;
737    pa_memimport *import;
738
739    pa_assert(b);
740    pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
741
742    pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
743    pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
744    pa_atomic_dec(&b->pool->stat.n_imported);
745    pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
746
747    pa_assert_se(segment = b->per_type.imported.segment);
748    pa_assert_se(import = segment->import);
749
750    pa_mutex_lock(import->mutex);
751
752    pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
753
754    memblock_make_local(b);
755
756    pa_assert(segment->n_blocks >= 1);
757    if (-- segment->n_blocks <= 0)
758        segment_detach(segment);
759
760    pa_mutex_unlock(import->mutex);
761}
762
763/*@per_client: This is a security measure. By default this should
764 * be set to true where the created mempool is never shared with more
765 * than one client in the system. Set this to false if a global
766 * mempool, shared with all existing and future clients, is required.
767 *
768 * NOTE-1: Do not create any further global mempools! They allow data
769 * leaks between clients and thus conflict with the xdg-app containers
770 * model. They also complicate the handling of memfd-based pools.
771 *
772 * NOTE-2: Almost all mempools are now created on a per client basis.
773 * The only exception is the pa_core's mempool which is still shared
774 * between all clients of the system.
775 *
776 * Beside security issues, special marking for global mempools is
777 * required for memfd communication. To avoid fd leaks, memfd pools
778 * are registered with the connection pstream to create an ID<->memfd
779 * mapping on both PA endpoints. Such memory regions are then always
780 * referenced by their IDs and never by their fds and thus their fds
781 * can be quickly closed later.
782 *
783 * Unfortunately this scheme cannot work with global pools since the
784 * ID registration mechanism needs to happen for each newly connected
785 * client, and thus the need for a more special handling. That is,
786 * for the pool's fd to be always open :-(
787 *
788 * TODO-1: Transform the global core mempool to a per-client one
789 * TODO-2: Remove global mempools support */
790pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
791    pa_log_debug("pa_mempool_new:type %d, size %zu, per_client %d,", type, size, per_client);
792    pa_mempool *p;
793    char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
794    const size_t page_size = pa_page_size();
795
796    p = pa_xnew0(pa_mempool, 1);
797    PA_REFCNT_INIT(p);
798
799    p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
800    if (p->block_size < page_size)
801        p->block_size = page_size;
802
803    if (size <= 0)
804        p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
805    else {
806        p->n_blocks = (unsigned) (size / p->block_size);
807
808        if (p->n_blocks < 2)
809            p->n_blocks = 2;
810    }
811
812    if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
813        pa_xfree(p);
814        return NULL;
815    }
816
817    pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is"
818                 "%s, maximum usable slot size is %lu",
819                 pa_mem_type_to_string(type),
820                 p->n_blocks,
821                 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
822                 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
823                 (unsigned long) pa_mempool_block_size_max(p));
824
825    p->global = !per_client;
826
827    pa_atomic_store(&p->n_init, 0);
828
829    PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
830    PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
831
832    p->mutex = pa_mutex_new(true, true);
833    p->semaphore = pa_semaphore_new(0);
834
835    p->free_slots = pa_flist_new(p->n_blocks);
836
837    return p;
838}
839
840static void mempool_free(pa_mempool *p) {
841    pa_assert(p);
842
843    pa_mutex_lock(p->mutex);
844
845    while (p->imports)
846        pa_memimport_free(p->imports);
847
848    while (p->exports)
849        pa_memexport_free(p->exports);
850
851    pa_mutex_unlock(p->mutex);
852
853    pa_flist_free(p->free_slots, NULL);
854
855    if (pa_atomic_load(&p->stat.n_allocated) > 0) {
856
857        /* Ouch, somebody is retaining a memory block reference! */
858
859#ifdef DEBUG_REF
860        unsigned i;
861        pa_flist *list;
862
863        /* Let's try to find at least one of those leaked memory blocks */
864
865        list = pa_flist_new(p->n_blocks);
866
867        for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
868            struct mempool_slot *slot;
869            pa_memblock *b, *k;
870
871            slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
872            b = mempool_slot_data(slot);
873
874            while ((k = pa_flist_pop(p->free_slots))) {
875                while (pa_flist_push(list, k) < 0)
876                    ;
877
878                if (b == k)
879                    break;
880            }
881
882            if (!k)
883                pa_log_error("REF: Leaked memory block %p", b);
884
885            while ((k = pa_flist_pop(list)))
886                while (pa_flist_push(p->free_slots, k) < 0)
887                    ;
888        }
889
890        pa_flist_free(list, NULL);
891
892#endif
893
894        pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.",
895            pa_atomic_load(&p->stat.n_allocated));
896
897/*         PA_DEBUG_TRAP; */
898    }
899
900    pa_shm_free(&p->memory);
901
902    pa_mutex_free(p->mutex);
903    pa_semaphore_free(p->semaphore);
904
905    pa_xfree(p);
906}
907
908/* No lock necessary */
909const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
910    pa_assert(p);
911
912    return &p->stat;
913}
914
915/* No lock necessary */
916size_t pa_mempool_block_size_max(pa_mempool *p) {
917    pa_assert(p);
918
919    return p->block_size - PA_ALIGN(sizeof(pa_memblock));
920}
921
922/* No lock necessary */
923void pa_mempool_vacuum(pa_mempool *p) {
924    struct mempool_slot *slot;
925    pa_flist *list;
926
927    pa_assert(p);
928
929    list = pa_flist_new(p->n_blocks);
930
931    while ((slot = pa_flist_pop(p->free_slots)))
932        while (pa_flist_push(list, slot) < 0)
933            ;
934
935    while ((slot = pa_flist_pop(list))) {
936        pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
937
938        while (pa_flist_push(p->free_slots, slot))
939            ;
940    }
941
942    pa_flist_free(list, NULL);
943}
944
945/* No lock necessary */
946bool pa_mempool_is_shared(pa_mempool *p) {
947    pa_assert(p);
948
949    return pa_mem_type_is_shared(p->memory.type);
950}
951
952/* No lock necessary */
953bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
954    pa_assert(p);
955
956    return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
957}
958
959/* No lock necessary */
960int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
961    pa_assert(p);
962
963    if (!pa_mempool_is_shared(p))
964        return -1;
965
966    *id = p->memory.id;
967
968    return 0;
969}
970
971pa_mempool* pa_mempool_ref(pa_mempool *p) {
972    pa_assert(p);
973    pa_assert(PA_REFCNT_VALUE(p) > 0);
974
975    PA_REFCNT_INC(p);
976    return p;
977}
978
979void pa_mempool_unref(pa_mempool *p) {
980    pa_assert(p);
981    pa_assert(PA_REFCNT_VALUE(p) > 0);
982
983    if (PA_REFCNT_DEC(p) <= 0)
984        mempool_free(p);
985}
986
987/* No lock necessary
988 * Check pa_mempool_new() for per-client vs. global mempools */
989bool pa_mempool_is_global(pa_mempool *p) {
990    pa_assert(p);
991
992    return p->global;
993}
994
995/* No lock necessary
996 * Check pa_mempool_new() for per-client vs. global mempools */
997bool pa_mempool_is_per_client(pa_mempool *p) {
998    return !pa_mempool_is_global(p);
999}
1000
1001/* Self-locked
1002 *
1003 * This is only for per-client mempools!
1004 *
1005 * After this method's return, the caller owns the file descriptor
1006 * and is responsible for closing it in the appropriate time. This
1007 * should only be called once during during a mempool's lifetime.
1008 *
1009 * Check pa_shm->fd and pa_mempool_new() for further context. */
1010int pa_mempool_take_memfd_fd(pa_mempool *p) {
1011    int memfd_fd;
1012
1013    pa_assert(p);
1014    pa_assert(pa_mempool_is_shared(p));
1015    pa_assert(pa_mempool_is_memfd_backed(p));
1016    pa_assert(pa_mempool_is_per_client(p));
1017
1018    pa_mutex_lock(p->mutex);
1019
1020    memfd_fd = p->memory.fd;
1021    p->memory.fd = -1;
1022
1023    pa_mutex_unlock(p->mutex);
1024
1025    pa_assert(memfd_fd != -1);
1026    return memfd_fd;
1027}
1028
1029/* No lock necessary
1030 *
1031 * This is only for global mempools!
1032 *
1033 * Global mempools have their memfd descriptor always open. DO NOT
1034 * close the returned descriptor by your own.
1035 *
1036 * Check pa_mempool_new() for further context. */
1037int pa_mempool_get_memfd_fd(pa_mempool *p) {
1038    int memfd_fd;
1039
1040    pa_assert(p);
1041    pa_assert(pa_mempool_is_shared(p));
1042    pa_assert(pa_mempool_is_memfd_backed(p));
1043    pa_assert(pa_mempool_is_global(p));
1044
1045    memfd_fd = p->memory.fd;
1046    pa_assert(memfd_fd != -1);
1047
1048    return memfd_fd;
1049}
1050
1051/* For receiving blocks from other nodes */
1052pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1053    pa_memimport *i;
1054
1055    pa_assert(p);
1056    pa_assert(cb);
1057
1058    i = pa_xnew(pa_memimport, 1);
1059    i->mutex = pa_mutex_new(true, true);
1060    i->pool = p;
1061    pa_mempool_ref(i->pool);
1062    i->segments = pa_hashmap_new(NULL, NULL);
1063    i->blocks = pa_hashmap_new(NULL, NULL);
1064    i->release_cb = cb;
1065    i->userdata = userdata;
1066
1067    pa_mutex_lock(p->mutex);
1068    PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1069    pa_mutex_unlock(p->mutex);
1070
1071    return i;
1072}
1073
1074static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1075
1076/* Should be called locked
1077 * Caller owns passed @memfd_fd and must close it down when appropriate. */
1078static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1079                                            int memfd_fd, bool writable) {
1080    pa_memimport_segment* seg;
1081    pa_assert(pa_mem_type_is_shared(type));
1082
1083    if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1084        return NULL;
1085
1086    seg = pa_xnew0(pa_memimport_segment, 1);
1087
1088    if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1089        pa_xfree(seg);
1090        return NULL;
1091    }
1092
1093    seg->writable = writable;
1094    seg->import = i;
1095    seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1096
1097    pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1098    return seg;
1099}
1100
1101/* Should be called locked */
1102static void segment_detach(pa_memimport_segment *seg) {
1103    pa_assert(seg);
1104    pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1105
1106    pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1107    pa_shm_free(&seg->memory);
1108
1109    if (seg->trap)
1110        pa_memtrap_remove(seg->trap);
1111
1112    pa_xfree(seg);
1113}
1114
1115/* Self-locked. Not multiple-caller safe */
1116void pa_memimport_free(pa_memimport *i) {
1117    pa_memexport *e;
1118    pa_memblock *b;
1119    pa_memimport_segment *seg;
1120    void *state = NULL;
1121
1122    pa_assert(i);
1123
1124    pa_mutex_lock(i->mutex);
1125
1126    while ((b = pa_hashmap_first(i->blocks)))
1127        memblock_replace_import(b);
1128
1129    /* Permanent segments exist for the lifetime of the memimport. Now
1130     * that we're freeing the memimport itself, clear them all up.
1131     *
1132     * Careful! segment_detach() internally removes itself from the
1133     * memimport's hash; the same hash we're now using for iteration. */
1134    PA_HASHMAP_FOREACH(seg, i->segments, state) {
1135        if (segment_is_permanent(seg))
1136            segment_detach(seg);
1137    }
1138    pa_assert(pa_hashmap_size(i->segments) == 0);
1139
1140    pa_mutex_unlock(i->mutex);
1141
1142    pa_mutex_lock(i->pool->mutex);
1143
1144    /* If we've exported this block further we need to revoke that export */
1145    for (e = i->pool->exports; e; e = e->next)
1146        memexport_revoke_blocks(e, i);
1147
1148    PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1149
1150    pa_mutex_unlock(i->pool->mutex);
1151
1152    pa_mempool_unref(i->pool);
1153    pa_hashmap_free(i->blocks);
1154    pa_hashmap_free(i->segments);
1155
1156    pa_mutex_free(i->mutex);
1157
1158    pa_xfree(i);
1159}
1160
1161/* Create a new memimport's memfd segment entry, with passed SHM ID
1162 * as key and the newly-created segment (with its mmap()-ed memfd
1163 * memory region) as its value.
1164 *
1165 * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1166 * and 'pa_pstream_register_memfd_mempool()' for further details.
1167 *
1168 * Caller owns passed @memfd_fd and must close it down when appropriate. */
1169int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1170    pa_memimport_segment *seg;
1171    int ret = -1;
1172
1173    pa_assert(i);
1174    pa_assert(memfd_fd != -1);
1175
1176    pa_mutex_lock(i->mutex);
1177
1178    if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1179        goto finish;
1180
1181    /* n_blocks acts as a segment reference count. To avoid the segment
1182     * being deleted when receiving silent memchunks, etc., mark our
1183     * permanent presence by incrementing that refcount. */
1184    seg->n_blocks++;
1185
1186    pa_assert(segment_is_permanent(seg));
1187    ret = 0;
1188
1189finish:
1190    pa_mutex_unlock(i->mutex);
1191    return ret;
1192}
1193
1194/* Self-locked */
1195pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1196                              size_t offset, size_t size, bool writable) {
1197    pa_memblock *b = NULL;
1198    pa_memimport_segment *seg;
1199
1200    pa_assert(i);
1201    pa_assert(pa_mem_type_is_shared(type));
1202
1203    pa_mutex_lock(i->mutex);
1204
1205    if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1206        pa_memblock_ref(b);
1207        goto finish;
1208    }
1209
1210    if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1211        goto finish;
1212
1213    if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1214        if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1215            pa_log_error("Bailing out! No cached memimport segment for memfd ID %u", shm_id);
1216            pa_log_error("Did the other PA endpoint forget registering its memfd pool?");
1217            goto finish;
1218        }
1219
1220        pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1221        if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1222            goto finish;
1223    }
1224
1225    if (writable && !seg->writable) {
1226        pa_log_error("Cannot import cached segment in write mode - previously mapped as read-only");
1227        goto finish;
1228    }
1229
1230    if (offset+size > seg->memory.size)
1231        goto finish;
1232
1233    if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1234        b = pa_xnew(pa_memblock, 1);
1235
1236    PA_REFCNT_INIT(b);
1237    b->pool = i->pool;
1238    pa_mempool_ref(b->pool);
1239    b->type = PA_MEMBLOCK_IMPORTED;
1240    b->read_only = !writable;
1241    b->is_silence = false;
1242    pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1243    b->length = size;
1244    pa_atomic_store(&b->n_acquired, 0);
1245    pa_atomic_store(&b->please_signal, 0);
1246    b->per_type.imported.id = block_id;
1247    b->per_type.imported.segment = seg;
1248
1249    pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1250
1251    seg->n_blocks++;
1252
1253    stat_add(b);
1254
1255finish:
1256    pa_mutex_unlock(i->mutex);
1257
1258    return b;
1259}
1260
1261int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1262    pa_memblock *b;
1263    int ret = 0;
1264    pa_assert(i);
1265
1266    pa_mutex_lock(i->mutex);
1267
1268    if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1269        ret = -1;
1270        goto finish;
1271    }
1272
1273    memblock_replace_import(b);
1274
1275finish:
1276    pa_mutex_unlock(i->mutex);
1277
1278    return ret;
1279}
1280
1281/* For sending blocks to other nodes */
1282pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1283    pa_memexport *e;
1284
1285    static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1286
1287    pa_assert(p);
1288    pa_assert(cb);
1289
1290    if (!pa_mempool_is_shared(p))
1291        return NULL;
1292
1293    e = pa_xnew(pa_memexport, 1);
1294    e->mutex = pa_mutex_new(true, true);
1295    e->pool = p;
1296    pa_mempool_ref(e->pool);
1297    PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1298    PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1299    e->n_init = 0;
1300    e->revoke_cb = cb;
1301    e->userdata = userdata;
1302
1303    pa_mutex_lock(p->mutex);
1304
1305    PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1306    e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1307
1308    pa_mutex_unlock(p->mutex);
1309    return e;
1310}
1311
1312void pa_memexport_free(pa_memexport *e) {
1313    pa_assert(e);
1314
1315    pa_mutex_lock(e->mutex);
1316    while (e->used_slots)
1317        pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1318    pa_mutex_unlock(e->mutex);
1319
1320    pa_mutex_lock(e->pool->mutex);
1321    PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1322    pa_mutex_unlock(e->pool->mutex);
1323
1324    pa_mempool_unref(e->pool);
1325    pa_mutex_free(e->mutex);
1326    pa_xfree(e);
1327}
1328
1329/* Self-locked */
1330int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1331    pa_memblock *b;
1332
1333    pa_assert(e);
1334
1335    pa_mutex_lock(e->mutex);
1336
1337    if (id < e->baseidx)
1338        goto fail;
1339    id -= e->baseidx;
1340
1341    if (id >= e->n_init)
1342        goto fail;
1343
1344    if (!e->slots[id].block)
1345        goto fail;
1346
1347    b = e->slots[id].block;
1348    e->slots[id].block = NULL;
1349
1350    PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1351    PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1352
1353    pa_mutex_unlock(e->mutex);
1354
1355/*     pa_log("Processing release for %u", id); */
1356
1357    pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1358    pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1359
1360    pa_atomic_dec(&e->pool->stat.n_exported);
1361    pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1362
1363    pa_memblock_unref(b);
1364
1365    return 0;
1366
1367fail:
1368    pa_mutex_unlock(e->mutex);
1369
1370    return -1;
1371}
1372
1373/* Self-locked */
1374static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1375    struct memexport_slot *slot, *next;
1376    pa_assert(e);
1377    pa_assert(i);
1378
1379    pa_mutex_lock(e->mutex);
1380
1381    for (slot = e->used_slots; slot; slot = next) {
1382        uint32_t idx;
1383        next = slot->next;
1384
1385        if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1386            slot->block->per_type.imported.segment->import != i)
1387            continue;
1388
1389        idx = (uint32_t) (slot - e->slots + e->baseidx);
1390        e->revoke_cb(e, idx, e->userdata);
1391        pa_memexport_process_release(e, idx);
1392    }
1393
1394    pa_mutex_unlock(e->mutex);
1395}
1396
1397/* No lock necessary */
1398static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1399    pa_memblock *n;
1400
1401    pa_assert(p);
1402    pa_assert(b);
1403
1404    if (b->type == PA_MEMBLOCK_IMPORTED ||
1405        b->type == PA_MEMBLOCK_POOL ||
1406        b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1407        pa_assert(b->pool == p);
1408        return pa_memblock_ref(b);
1409    }
1410
1411    if (!(n = pa_memblock_new_pool(p, b->length)))
1412        return NULL;
1413
1414    memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1415    return n;
1416}
1417
1418/* Self-locked */
1419int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1420                     uint32_t *shm_id, size_t *offset, size_t * size) {
1421    pa_shm  *memory;
1422    struct memexport_slot *slot;
1423    void *data;
1424
1425    pa_assert(e);
1426    pa_assert(b);
1427    pa_assert(type);
1428    pa_assert(block_id);
1429    pa_assert(shm_id);
1430    pa_assert(offset);
1431    pa_assert(size);
1432    pa_assert(b->pool == e->pool);
1433
1434    if (!(b = memblock_shared_copy(e->pool, b)))
1435        return -1;
1436
1437    pa_mutex_lock(e->mutex);
1438
1439    if (e->free_slots) {
1440        slot = e->free_slots;
1441        PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1442    } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1443        slot = &e->slots[e->n_init++];
1444    else {
1445        pa_mutex_unlock(e->mutex);
1446        pa_memblock_unref(b);
1447        return -1;
1448    }
1449
1450    PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1451    slot->block = b;
1452    *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1453
1454    pa_mutex_unlock(e->mutex);
1455/*     pa_log("Got block id %u", *block_id); */
1456
1457    data = pa_memblock_acquire(b);
1458
1459    if (b->type == PA_MEMBLOCK_IMPORTED) {
1460        pa_assert(b->per_type.imported.segment);
1461        memory = &b->per_type.imported.segment->memory;
1462    } else {
1463        pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1464        pa_assert(b->pool);
1465        pa_assert(pa_mempool_is_shared(b->pool));
1466        memory = &b->pool->memory;
1467    }
1468
1469    pa_assert(data >= memory->ptr);
1470    pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1471
1472    *type = memory->type;
1473    *shm_id = memory->id;
1474    *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1475    *size = b->length;
1476
1477    pa_memblock_release(b);
1478
1479    pa_atomic_inc(&e->pool->stat.n_exported);
1480    pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);
1481
1482    return 0;
1483}
1484