1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #ifndef LOG_TAG
26 #define LOG_TAG "Shm"
27 #endif
28
29 #include <stdlib.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32 #include <stdio.h>
33 #include <errno.h>
34 #include <string.h>
35 #include <sys/stat.h>
36 #include <sys/types.h>
37 #include <dirent.h>
38 #include <signal.h>
39
40 #ifdef HAVE_SYS_MMAN_H
41 #include <sys/mman.h>
42 #endif
43
44 /* This is deprecated on glibc but is still used by FreeBSD */
45 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
46 # define MAP_ANONYMOUS MAP_ANON
47 #endif
48
49 #include <pulse/xmalloc.h>
50 #include <pulse/gccmacro.h>
51
52 #include <pulsecore/memfd-wrappers.h>
53 #include <pulsecore/core-error.h>
54 #include <pulsecore/log.h>
55 #include <pulsecore/random.h>
56 #include <pulsecore/core-util.h>
57 #include <pulsecore/macro.h>
58 #include <pulsecore/atomic.h>
59 #include <pulsecore/mem.h>
60
61 #include "shm.h"
62
63 #if defined(__linux__) && !defined(MADV_REMOVE)
64 #define MADV_REMOVE 9
65 #endif
66
67 /* 1 GiB at max */
68 #define MAX_SHM_SIZE (PA_ALIGN(1024*1024*1024))
69
70 #ifdef __linux__
71 /* On Linux we know that the shared memory blocks are files in
72 * /dev/shm. We can use that information to list all blocks and
73 * cleanup unused ones */
74 #define SHM_PATH "/dev/shm/"
75 #define SHM_ID_LEN 10
76 #elif defined(__sun)
77 #define SHM_PATH "/tmp"
78 #define SHM_ID_LEN 15
79 #else
80 #undef SHM_PATH
81 #undef SHM_ID_LEN
82 #endif
83
84 #define SHM_MARKER ((int) 0xbeefcafe)
85
86 /* We now put this SHM marker at the end of each segment. It's
87 * optional, to not require a reboot when upgrading, though. Note that
88 * on multiarch systems 32bit and 64bit processes might access this
89 * region simultaneously. The header fields need to be independent
90 * from the process' word with */
91 struct shm_marker {
92 pa_atomic_t marker; /* 0xbeefcafe */
93 pa_atomic_t pid;
94 uint64_t _reserved1;
95 uint64_t _reserved2;
96 uint64_t _reserved3;
97 uint64_t _reserved4;
98 };
99
100 // Ensure struct is appropriately packed
101 static_assert(sizeof(struct shm_marker) == 8 * 5, "`struct shm_marker` is not tightly packed");
102
shm_marker_size(pa_mem_type_t type)103 static inline size_t shm_marker_size(pa_mem_type_t type) {
104 if (type == PA_MEM_TYPE_SHARED_POSIX)
105 return PA_ALIGN(sizeof(struct shm_marker));
106
107 return 0;
108 }
109
110 #ifdef HAVE_SHM_OPEN
segment_name(char *fn, size_t l, unsigned id)111 static char *segment_name(char *fn, size_t l, unsigned id) {
112 pa_snprintf(fn, l, "/pulse-shm-%u", id);
113 return fn;
114 }
115 #endif
116
privatemem_create(pa_shm *m, size_t size)117 static int privatemem_create(pa_shm *m, size_t size) {
118 pa_assert(m);
119 pa_assert(size > 0);
120
121 m->type = PA_MEM_TYPE_PRIVATE;
122 m->id = 0;
123 m->size = size;
124 m->do_unlink = false;
125 m->fd = -1;
126
127 #ifdef MAP_ANONYMOUS
128 if ((m->ptr = mmap(NULL, m->size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, (off_t) 0)) == MAP_FAILED) {
129 pa_log_error("mmap() failed: %s", pa_cstrerror(errno));
130 return -1;
131 }
132 #elif defined(HAVE_POSIX_MEMALIGN)
133 {
134 int r;
135
136 if ((r = posix_memalign(&m->ptr, pa_page_size(), size)) < 0) {
137 pa_log_error("posix_memalign() failed: %s", pa_cstrerror(r));
138 return r;
139 }
140 }
141 #else
142 m->ptr = pa_xmalloc(m->size);
143 #endif
144
145 return 0;
146 }
147
sharedmem_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode)148 static int sharedmem_create(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
149 #if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
150 char fn[32];
151 int fd = -1;
152 struct shm_marker *marker;
153 bool do_unlink = false;
154
155 /* Each time we create a new SHM area, let's first drop all stale
156 * ones */
157 pa_shm_cleanup();
158
159 pa_random(&m->id, sizeof(m->id));
160
161 switch (type) {
162 #ifdef HAVE_SHM_OPEN
163 case PA_MEM_TYPE_SHARED_POSIX:
164 segment_name(fn, sizeof(fn), m->id);
165 fd = shm_open(fn, O_RDWR|O_CREAT|O_EXCL, mode);
166 do_unlink = true;
167 break;
168 #endif
169 #ifdef HAVE_MEMFD
170 case PA_MEM_TYPE_SHARED_MEMFD:
171 fd = memfd_create("pulseaudio", MFD_ALLOW_SEALING);
172 break;
173 #endif
174 default:
175 goto fail;
176 }
177
178 if (fd < 0) {
179 pa_log_error("%s open() failed: %s", pa_mem_type_to_string(type), pa_cstrerror(errno));
180 goto fail;
181 }
182
183 m->type = type;
184 m->size = size + shm_marker_size(type);
185 m->do_unlink = do_unlink;
186
187 if (ftruncate(fd, (off_t) m->size) < 0) {
188 pa_log_error("ftruncate() failed: %s", pa_cstrerror(errno));
189 goto fail;
190 }
191
192 #ifndef MAP_NORESERVE
193 #define MAP_NORESERVE 0
194 #endif
195
196 if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(m->size), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd, (off_t) 0)) == MAP_FAILED) {
197 pa_log_error("mmap() failed: %s", pa_cstrerror(errno));
198 goto fail;
199 }
200
201 if (type == PA_MEM_TYPE_SHARED_POSIX) {
202 /* We store our PID at the end of the shm block, so that we
203 * can check for dead shm segments later */
204 marker = (struct shm_marker*) ((uint8_t*) m->ptr + m->size - shm_marker_size(type));
205 pa_atomic_store(&marker->pid, (int) getpid());
206 pa_atomic_store(&marker->marker, SHM_MARKER);
207 }
208
209 /* For memfds, we keep the fd open until we pass it
210 * to the other PA endpoint over unix domain socket. */
211 if (type != PA_MEM_TYPE_SHARED_MEMFD) {
212 pa_assert_se(pa_close(fd) == 0);
213 m->fd = -1;
214 }
215 #ifdef HAVE_MEMFD
216 else
217 m->fd = fd;
218 #endif
219
220 return 0;
221
222 fail:
223 if (fd >= 0) {
224 #ifdef HAVE_SHM_OPEN
225 if (type == PA_MEM_TYPE_SHARED_POSIX)
226 shm_unlink(fn);
227 #endif
228 pa_close(fd);
229 }
230 #endif /* defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) */
231
232 return -1;
233 }
234
pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode)235 int pa_shm_create_rw(pa_shm *m, pa_mem_type_t type, size_t size, mode_t mode) {
236 pa_assert(m);
237 pa_assert(size > 0);
238 pa_assert(size <= MAX_SHM_SIZE);
239 pa_assert(!(mode & ~0777));
240 pa_assert(mode >= 0600);
241
242 /* Round up to make it page aligned */
243 size = PA_PAGE_ALIGN(size);
244
245 if (type == PA_MEM_TYPE_PRIVATE)
246 return privatemem_create(m, size);
247
248 return sharedmem_create(m, type, size, mode);
249 }
250
privatemem_free(pa_shm *m)251 static void privatemem_free(pa_shm *m) {
252 pa_assert(m);
253 pa_assert(m->ptr);
254 pa_assert(m->size > 0);
255
256 #ifdef MAP_ANONYMOUS
257 if (munmap(m->ptr, m->size) < 0)
258 pa_log_error("munmap() failed: %s", pa_cstrerror(errno));
259 #elif defined(HAVE_POSIX_MEMALIGN)
260 free(m->ptr);
261 #else
262 pa_xfree(m->ptr);
263 #endif
264 }
265
pa_shm_free(pa_shm *m)266 void pa_shm_free(pa_shm *m) {
267 pa_assert(m);
268 pa_assert(m->ptr);
269 pa_assert(m->size > 0);
270
271 #ifdef MAP_FAILED
272 pa_assert(m->ptr != MAP_FAILED);
273 #endif
274
275 if (m->type == PA_MEM_TYPE_PRIVATE) {
276 privatemem_free(m);
277 goto finish;
278 }
279
280 pa_log_info("mem type: %d", m->type);
281
282 #if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
283 if (munmap(m->ptr, PA_PAGE_ALIGN(m->size)) < 0)
284 pa_log_error("munmap() failed: %s", pa_cstrerror(errno));
285
286 #ifdef HAVE_SHM_OPEN
287 if (m->type == PA_MEM_TYPE_SHARED_POSIX && m->do_unlink) {
288 char fn[32];
289
290 segment_name(fn, sizeof(fn), m->id);
291 if (shm_unlink(fn) < 0)
292 pa_log_error(" shm_unlink(%s) failed: %s", fn, pa_cstrerror(errno));
293 }
294 #endif
295 #ifdef HAVE_MEMFD
296 if (m->type == PA_MEM_TYPE_SHARED_MEMFD && m->fd != -1)
297 pa_assert_se(pa_close(m->fd) == 0);
298 #endif
299
300 #else
301 /* We shouldn't be here without shm or memfd support */
302 pa_log_error("remove pa_assert_not_reached call");
303 #endif
304
305 finish:
306 pa_zero(*m);
307 }
308
pa_shm_punch(pa_shm *m, size_t offset, size_t size)309 void pa_shm_punch(pa_shm *m, size_t offset, size_t size) {
310 void *ptr;
311 size_t o;
312 const size_t page_size = pa_page_size();
313
314 pa_assert(m);
315 pa_assert(m->ptr);
316 pa_assert(m->size > 0);
317 pa_assert(offset+size <= m->size);
318
319 #ifdef MAP_FAILED
320 pa_assert(m->ptr != MAP_FAILED);
321 #endif
322
323 /* You're welcome to implement this as NOOP on systems that don't
324 * support it */
325
326 /* Align the pointer up to multiples of the page size */
327 ptr = (uint8_t*) m->ptr + offset;
328 o = (size_t) ((uint8_t*) ptr - (uint8_t*) PA_PAGE_ALIGN_PTR(ptr));
329
330 if (o > 0) {
331 size_t delta = page_size - o;
332 ptr = (uint8_t*) ptr + delta;
333 size -= delta;
334 }
335
336 /* Align the size down to multiples of page size */
337 size = (size / page_size) * page_size;
338
339 #ifdef MADV_REMOVE
340 if (madvise(ptr, size, MADV_REMOVE) >= 0)
341 return;
342 #endif
343
344 #ifdef MADV_FREE
345 if (madvise(ptr, size, MADV_FREE) >= 0)
346 return;
347 #endif
348
349 #ifdef MADV_DONTNEED
350 madvise(ptr, size, MADV_DONTNEED);
351 #elif defined(POSIX_MADV_DONTNEED)
352 posix_madvise(ptr, size, POSIX_MADV_DONTNEED);
353 #endif
354 }
355
shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable, bool for_cleanup)356 static int shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable, bool for_cleanup) {
357 #if defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD)
358 char fn[32];
359 int fd = -1;
360 int prot;
361 struct stat st;
362
363 pa_assert(m);
364
365 switch (type) {
366 #ifdef HAVE_SHM_OPEN
367 case PA_MEM_TYPE_SHARED_POSIX:
368 pa_assert(memfd_fd == -1);
369 segment_name(fn, sizeof(fn), id);
370 if ((fd = shm_open(fn, writable ? O_RDWR : O_RDONLY, 0)) < 0) {
371 if ((errno != EACCES && errno != ENOENT) || !for_cleanup)
372 pa_log_error("shm_open() failed: %s", pa_cstrerror(errno));
373 goto fail;
374 }
375 break;
376 #endif
377 #ifdef HAVE_MEMFD
378 case PA_MEM_TYPE_SHARED_MEMFD:
379 pa_assert(memfd_fd != -1);
380 fd = memfd_fd;
381 break;
382 #endif
383 default:
384 goto fail;
385 }
386
387 if (fstat(fd, &st) < 0) {
388 pa_log_error("fstat() failed: %s", pa_cstrerror(errno));
389 goto fail;
390 }
391
392 if (st.st_size <= 0 ||
393 st.st_size > (off_t) MAX_SHM_SIZE + (off_t) shm_marker_size(type) ||
394 PA_ALIGN((size_t) st.st_size) != (size_t) st.st_size) {
395 pa_log_error("Invalid shared memory segment size");
396 goto fail;
397 }
398
399 prot = writable ? PROT_READ | PROT_WRITE : PROT_READ;
400 if ((m->ptr = mmap(NULL, PA_PAGE_ALIGN(st.st_size), prot, MAP_SHARED, fd, (off_t) 0)) == MAP_FAILED) {
401 pa_log_error("mmap() failed: %s", pa_cstrerror(errno));
402 goto fail;
403 }
404
405 /* In case of attaching to memfd areas, _the caller_ maintains
406 * ownership of the passed fd and has the sole responsibility
407 * of closing it down.. For other types, we're the code path
408 * which created the fd in the first place and we're thus the
409 * ones responsible for closing it down */
410 if (type != PA_MEM_TYPE_SHARED_MEMFD)
411 pa_assert_se(pa_close(fd) == 0);
412
413 pa_log_info("shm_attach set mem type %d", type);
414 m->type = type;
415 m->id = id;
416 m->size = (size_t) st.st_size;
417 m->do_unlink = false;
418 m->fd = -1;
419
420 return 0;
421
422 fail:
423 /* In case of memfds, caller maintains fd ownership */
424 if (fd >= 0 && type != PA_MEM_TYPE_SHARED_MEMFD)
425 pa_close(fd);
426
427 #endif /* defined(HAVE_SHM_OPEN) || defined(HAVE_MEMFD) */
428
429 return -1;
430 }
431
432 /* Caller owns passed @memfd_fd and must close it down when appropriate. */
pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable)433 int pa_shm_attach(pa_shm *m, pa_mem_type_t type, unsigned id, int memfd_fd, bool writable) {
434 return shm_attach(m, type, id, memfd_fd, writable, false);
435 }
436
pa_shm_cleanup(void)437 int pa_shm_cleanup(void) {
438 pa_log_info("start pa_shm_cleanup");
439 #ifdef HAVE_SHM_OPEN
440 #ifdef SHM_PATH
441 DIR *d;
442 struct dirent *de;
443
444 if (!(d = opendir(SHM_PATH))) {
445 pa_log_warn("Failed to read "SHM_PATH": %s", pa_cstrerror(errno));
446 return -1;
447 }
448
449 while ((de = readdir(d))) {
450 pa_shm seg;
451 unsigned id;
452 pid_t pid;
453 char fn[128];
454 struct shm_marker *m;
455
456 #if defined(__sun)
457 if (strncmp(de->d_name, ".SHMDpulse-shm-", SHM_ID_LEN))
458 #else
459 if (strncmp(de->d_name, "pulse-shm-", SHM_ID_LEN))
460 #endif
461 continue;
462
463 if (pa_atou(de->d_name + SHM_ID_LEN, &id) < 0)
464 continue;
465
466 if (shm_attach(&seg, PA_MEM_TYPE_SHARED_POSIX, id, -1, false, true) < 0)
467 continue;
468
469 if (seg.size < shm_marker_size(seg.type)) {
470 pa_shm_free(&seg);
471 continue;
472 }
473
474 m = (struct shm_marker*) ((uint8_t*) seg.ptr + seg.size - shm_marker_size(seg.type));
475
476 if (pa_atomic_load(&m->marker) != SHM_MARKER) {
477 pa_shm_free(&seg);
478 continue;
479 }
480
481 if (!(pid = (pid_t) pa_atomic_load(&m->pid))) {
482 pa_shm_free(&seg);
483 continue;
484 }
485
486 if (kill(pid, 0) == 0 || errno != ESRCH) {
487 pa_shm_free(&seg);
488 continue;
489 }
490
491 pa_shm_free(&seg);
492
493 /* Ok, the owner of this shms segment is dead, so, let's remove the segment */
494 segment_name(fn, sizeof(fn), id);
495
496 if (shm_unlink(fn) < 0 && errno != EACCES && errno != ENOENT)
497 pa_log_warn("Failed to remove SHM segment %s: %s", fn, pa_cstrerror(errno));
498 }
499
500 closedir(d);
501 #endif /* SHM_PATH */
502 #endif /* HAVE_SHM_OPEN */
503
504 return 0;
505 }
506