1 /***
2   This file is part of PulseAudio.
3 
4   Copyright 2004-2006 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6 
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11 
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
19 ***/
20 
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24 
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 
29 #include <pulse/introspect.h>
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37 
38 #include <pulsecore/i18n.h>
39 #include <pulsecore/sink-input.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/sample-util.h>
43 #include <pulsecore/stream-util.h>
44 #include <pulsecore/mix.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
50 
51 #include "sink.h"
52 
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (pa_page_size())
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58 
59 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60 
61 struct pa_sink_volume_change {
62     pa_usec_t at;
63     pa_cvolume hw_volume;
64 
65     PA_LLIST_FIELDS(pa_sink_volume_change);
66 };
67 
68 struct set_state_data {
69     pa_sink_state_t state;
70     pa_suspend_cause_t suspend_cause;
71 };
72 
73 static void sink_free(pa_object *s);
74 
75 static void pa_sink_volume_change_push(pa_sink *s);
76 static void pa_sink_volume_change_flush(pa_sink *s);
77 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78 
pa_sink_new_data_init(pa_sink_new_data *data)79 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
80     pa_assert(data);
81 
82     pa_zero(*data);
83     data->proplist = pa_proplist_new();
84     data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
85 
86     return data;
87 }
88 
pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name)89 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
90     pa_assert(data);
91 
92     pa_xfree(data->name);
93     data->name = pa_xstrdup(name);
94 }
95 
pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec)96 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
97     pa_assert(data);
98 
99     if ((data->sample_spec_is_set = !!spec))
100         data->sample_spec = *spec;
101 }
102 
pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map)103 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
104     pa_assert(data);
105 
106     if ((data->channel_map_is_set = !!map))
107         data->channel_map = *map;
108 }
109 
pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate)110 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
111     pa_assert(data);
112 
113     data->alternate_sample_rate_is_set = true;
114     data->alternate_sample_rate = alternate_sample_rate;
115 }
116 
pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling)117 void pa_sink_new_data_set_avoid_resampling(pa_sink_new_data *data, bool avoid_resampling) {
118     pa_assert(data);
119 
120     data->avoid_resampling_is_set = true;
121     data->avoid_resampling = avoid_resampling;
122 }
123 
pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume)124 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
125     pa_assert(data);
126 
127     if ((data->volume_is_set = !!volume))
128         data->volume = *volume;
129 }
130 
pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute)131 void pa_sink_new_data_set_muted(pa_sink_new_data *data, bool mute) {
132     pa_assert(data);
133 
134     data->muted_is_set = true;
135     data->muted = mute;
136 }
137 
pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port)138 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
139     pa_assert(data);
140 
141     pa_xfree(data->active_port);
142     data->active_port = pa_xstrdup(port);
143 }
144 
pa_sink_new_data_done(pa_sink_new_data *data)145 void pa_sink_new_data_done(pa_sink_new_data *data) {
146     pa_assert(data);
147 
148     pa_proplist_free(data->proplist);
149 
150     if (data->ports)
151         pa_hashmap_free(data->ports);
152 
153     pa_xfree(data->name);
154     pa_xfree(data->active_port);
155 }
156 
157 /* Called from main context */
reset_callbacks(pa_sink *s)158 static void reset_callbacks(pa_sink *s) {
159     pa_assert(s);
160 
161     s->set_state_in_main_thread = NULL;
162     s->set_state_in_io_thread = NULL;
163     s->get_volume = NULL;
164     s->set_volume = NULL;
165     s->write_volume = NULL;
166     s->get_mute = NULL;
167     s->set_mute = NULL;
168     s->request_rewind = NULL;
169     s->update_requested_latency = NULL;
170     s->set_port = NULL;
171     s->get_formats = NULL;
172     s->set_formats = NULL;
173     s->reconfigure = NULL;
174 }
175 
176 /* Called from main context */
pa_sink_new( pa_core *core, pa_sink_new_data *data, pa_sink_flags_t flags)177 pa_sink* pa_sink_new(
178         pa_core *core,
179         pa_sink_new_data *data,
180         pa_sink_flags_t flags) {
181 
182     pa_sink *s;
183     const char *name;
184     char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
185     pa_source_new_data source_data;
186     const char *dn;
187     char *pt;
188 
189     pa_assert(core);
190     pa_assert(data);
191     pa_assert(data->name);
192     pa_assert_ctl_context();
193 
194     s = pa_msgobject_new(pa_sink);
195 
196     if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
197         pa_log_debug("Failed to register name %s.", data->name);
198         pa_xfree(s);
199         return NULL;
200     }
201 
202     pa_sink_new_data_set_name(data, name);
203 
204     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
205         pa_xfree(s);
206         pa_namereg_unregister(core, name);
207         return NULL;
208     }
209 
210     /* FIXME, need to free s here on failure */
211 
212     pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
213     pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
214 
215     pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
216 
217     if (!data->channel_map_is_set)
218         pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
219 
220     pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
221     pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
222 
223     /* FIXME: There should probably be a general function for checking whether
224      * the sink volume is allowed to be set, like there is for sink inputs. */
225     pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
226 
227     if (!data->volume_is_set) {
228         pa_cvolume_reset(&data->volume, data->sample_spec.channels);
229         data->save_volume = false;
230     }
231 
232     pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
233     pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
234 
235     if (!data->muted_is_set)
236         data->muted = false;
237 
238     if (data->card)
239         pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
240 
241     pa_device_init_description(data->proplist, data->card);
242     pa_device_init_icon(data->proplist, true);
243     pa_device_init_intended_roles(data->proplist);
244 
245     if (!data->active_port) {
246         pa_device_port *p = pa_device_port_find_best(data->ports);
247         if (p)
248             pa_sink_new_data_set_port(data, p->name);
249     }
250 
251     if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
252         pa_xfree(s);
253         pa_namereg_unregister(core, name);
254         return NULL;
255     }
256 
257     s->parent.parent.free = sink_free;
258     s->parent.process_msg = pa_sink_process_msg;
259 
260     s->core = core;
261     s->state = PA_SINK_INIT;
262     s->flags = flags;
263     s->priority = 0;
264     s->suspend_cause = data->suspend_cause;
265     s->name = pa_xstrdup(name);
266     s->proplist = pa_proplist_copy(data->proplist);
267     s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
268     s->module = data->module;
269     s->card = data->card;
270 
271     s->priority = pa_device_init_priority(s->proplist);
272 
273     s->sample_spec = data->sample_spec;
274     s->channel_map = data->channel_map;
275     s->default_sample_rate = s->sample_spec.rate;
276 
277     if (data->alternate_sample_rate_is_set)
278         s->alternate_sample_rate = data->alternate_sample_rate;
279     else
280         s->alternate_sample_rate = s->core->alternate_sample_rate;
281 
282     if (data->avoid_resampling_is_set)
283         s->avoid_resampling = data->avoid_resampling;
284     else
285         s->avoid_resampling = s->core->avoid_resampling;
286 
287     s->inputs = pa_idxset_new(NULL, NULL);
288     s->n_corked = 0;
289     s->input_to_master = NULL;
290 
291     s->reference_volume = s->real_volume = data->volume;
292     pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
293     s->base_volume = PA_VOLUME_NORM;
294     s->n_volume_steps = PA_VOLUME_NORM+1;
295     s->muted = data->muted;
296     s->refresh_volume = s->refresh_muted = false;
297 
298     reset_callbacks(s);
299     s->userdata = NULL;
300 
301     s->asyncmsgq = NULL;
302 
303     /* As a minor optimization we just steal the list instead of
304      * copying it here */
305     s->ports = data->ports;
306     data->ports = NULL;
307 
308     s->active_port = NULL;
309     s->save_port = false;
310 
311     if (data->active_port)
312         if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
313             s->save_port = data->save_port;
314 
315     /* Hopefully the active port has already been assigned in the previous call
316        to pa_device_port_find_best, but better safe than sorry */
317     if (!s->active_port)
318         s->active_port = pa_device_port_find_best(s->ports);
319 
320     if (s->active_port)
321         s->port_latency_offset = s->active_port->latency_offset;
322     else
323         s->port_latency_offset = 0;
324 
325     s->save_volume = data->save_volume;
326     s->save_muted = data->save_muted;
327 
328     pa_silence_memchunk_get(
329             &core->silence_cache,
330             core->mempool,
331             &s->silence,
332             &s->sample_spec,
333             0);
334 
335     s->thread_info.rtpoll = NULL;
336     s->thread_info.inputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
337                                                 (pa_free_cb_t) pa_sink_input_unref);
338     s->thread_info.soft_volume =  s->soft_volume;
339     s->thread_info.soft_muted = s->muted;
340     s->thread_info.state = s->state;
341     s->thread_info.rewind_nbytes = 0;
342     s->thread_info.last_rewind_nbytes = 0;
343     s->thread_info.rewind_requested = false;
344     s->thread_info.max_rewind = 0;
345     s->thread_info.max_request = 0;
346     s->thread_info.requested_latency_valid = false;
347     s->thread_info.requested_latency = 0;
348     s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
349     s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
350     s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
351 
352     PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
353     s->thread_info.volume_changes_tail = NULL;
354     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
355     s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
356     s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
357     s->thread_info.port_latency_offset = s->port_latency_offset;
358 
359     /* FIXME: This should probably be moved to pa_sink_put() */
360     pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
361 
362     if (s->card)
363         pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
364 
365     pt = pa_proplist_to_string_sep(s->proplist, "\n    ");
366     pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n    %s",
367                 s->index,
368                 s->name,
369                 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
370                 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
371                 pt);
372     pa_xfree(pt);
373 
374     pa_source_new_data_init(&source_data);
375     pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
376     pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
377     pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
378     pa_source_new_data_set_avoid_resampling(&source_data, s->avoid_resampling);
379     source_data.name = pa_sprintf_malloc("%s.monitor", name);
380     source_data.driver = data->driver;
381     source_data.module = data->module;
382     source_data.card = data->card;
383 
384     dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
385     pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
386     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
387 
388     s->monitor_source = pa_source_new(core, &source_data,
389                                       ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
390                                       ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
391 
392     pa_source_new_data_done(&source_data);
393 
394     if (!s->monitor_source) {
395         pa_sink_unlink(s);
396         pa_sink_unref(s);
397         return NULL;
398     }
399 
400     s->monitor_source->monitor_of = s;
401 
402     pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
403     pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
404     pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
405 
406     return s;
407 }
408 
409 /* Called from main context */
sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause)410 static int sink_set_state(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
411     int ret = 0;
412     bool state_changed;
413     bool suspend_cause_changed;
414     bool suspending;
415     bool resuming;
416     pa_sink_state_t old_state;
417     pa_suspend_cause_t old_suspend_cause;
418 
419     pa_assert(s);
420     pa_assert_ctl_context();
421 
422     state_changed = state != s->state;
423     suspend_cause_changed = suspend_cause != s->suspend_cause;
424 
425     if (!state_changed && !suspend_cause_changed)
426         return 0;
427 
428     suspending = PA_SINK_IS_OPENED(s->state) && state == PA_SINK_SUSPENDED;
429     resuming = s->state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state);
430 
431     /* If we are resuming, suspend_cause must be 0. */
432     pa_assert(!resuming || !suspend_cause);
433 
434     /* Here's something to think about: what to do with the suspend cause if
435      * resuming the sink fails? The old suspend cause will be incorrect, so we
436      * can't use that. On the other hand, if we set no suspend cause (as is the
437      * case currently), then it looks strange to have a sink suspended without
438      * any cause. It might be a good idea to add a new "resume failed" suspend
439      * cause, or it might just add unnecessary complexity, given that the
440      * current approach of not setting any suspend cause works well enough. */
441 
442     if (s->set_state_in_main_thread) {
443         if ((ret = s->set_state_in_main_thread(s, state, suspend_cause)) < 0) {
444             /* set_state_in_main_thread() is allowed to fail only when resuming. */
445             pa_assert(resuming);
446 
447             /* If resuming fails, we set the state to SUSPENDED and
448              * suspend_cause to 0. */
449             state = PA_SINK_SUSPENDED;
450             suspend_cause = 0;
451             state_changed = false;
452             suspend_cause_changed = suspend_cause != s->suspend_cause;
453             resuming = false;
454 
455             /* We know the state isn't changing. If the suspend cause isn't
456              * changing either, then there's nothing more to do. */
457             if (!suspend_cause_changed)
458                 return ret;
459         }
460     }
461 
462     if (s->asyncmsgq) {
463         struct set_state_data data = { .state = state, .suspend_cause = suspend_cause };
464 
465         if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, &data, 0, NULL)) < 0) {
466             /* SET_STATE is allowed to fail only when resuming. */
467             pa_assert(resuming);
468 
469             if (s->set_state_in_main_thread)
470                 s->set_state_in_main_thread(s, PA_SINK_SUSPENDED, 0);
471 
472             /* If resuming fails, we set the state to SUSPENDED and
473              * suspend_cause to 0. */
474             state = PA_SINK_SUSPENDED;
475             suspend_cause = 0;
476             state_changed = false;
477             suspend_cause_changed = suspend_cause != s->suspend_cause;
478             resuming = false;
479 
480             /* We know the state isn't changing. If the suspend cause isn't
481              * changing either, then there's nothing more to do. */
482             if (!suspend_cause_changed)
483                 return ret;
484         }
485     }
486 
487     old_suspend_cause = s->suspend_cause;
488     if (suspend_cause_changed) {
489         char old_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
490         char new_cause_buf[PA_SUSPEND_CAUSE_TO_STRING_BUF_SIZE];
491 
492         pa_log_debug("%s: suspend_cause: %s -> %s", s->name, pa_suspend_cause_to_string(s->suspend_cause, old_cause_buf),
493                      pa_suspend_cause_to_string(suspend_cause, new_cause_buf));
494         s->suspend_cause = suspend_cause;
495     }
496 
497     old_state = s->state;
498     if (state_changed) {
499         pa_log_debug("%s: state: %s -> %s", s->name, pa_sink_state_to_string(s->state), pa_sink_state_to_string(state));
500         s->state = state;
501 
502         /* If we enter UNLINKED state, then we don't send change notifications.
503          * pa_sink_unlink() will send unlink notifications instead. */
504         if (state != PA_SINK_UNLINKED) {
505             pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
506             pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507         }
508     }
509 
510     if (suspending || resuming || suspend_cause_changed) {
511         pa_sink_input *i;
512         uint32_t idx;
513 
514         /* We're suspending or resuming, tell everyone about it */
515 
516         PA_IDXSET_FOREACH(i, s->inputs, idx)
517             if (s->state == PA_SINK_SUSPENDED &&
518                 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
519                 pa_sink_input_kill(i);
520             else if (i->suspend)
521                 i->suspend(i, old_state, old_suspend_cause);
522     }
523 
524     if ((suspending || resuming || suspend_cause_changed) && s->monitor_source && state != PA_SINK_UNLINKED)
525         pa_source_sync_suspend(s->monitor_source);
526 
527     return ret;
528 }
529 
pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb)530 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
531     pa_assert(s);
532 
533     s->get_volume = cb;
534 }
535 
pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb)536 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
537     pa_sink_flags_t flags;
538 
539     pa_assert(s);
540     pa_assert(!s->write_volume || cb);
541 
542     s->set_volume = cb;
543 
544     /* Save the current flags so we can tell if they've changed */
545     flags = s->flags;
546 
547     if (cb) {
548         /* The sink implementor is responsible for setting decibel volume support */
549         s->flags |= PA_SINK_HW_VOLUME_CTRL;
550     } else {
551         s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
552         /* See note below in pa_sink_put() about volume sharing and decibel volumes */
553         pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
554     }
555 
556     /* If the flags have changed after init, let any clients know via a change event */
557     if (s->state != PA_SINK_INIT && flags != s->flags)
558         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
559 }
560 
pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb)561 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
562     pa_sink_flags_t flags;
563 
564     pa_assert(s);
565     pa_assert(!cb || s->set_volume);
566 
567     s->write_volume = cb;
568 
569     /* Save the current flags so we can tell if they've changed */
570     flags = s->flags;
571 
572     if (cb)
573         s->flags |= PA_SINK_DEFERRED_VOLUME;
574     else
575         s->flags &= ~PA_SINK_DEFERRED_VOLUME;
576 
577     /* If the flags have changed after init, let any clients know via a change event */
578     if (s->state != PA_SINK_INIT && flags != s->flags)
579         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
580 }
581 
pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb)582 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_get_mute_cb_t cb) {
583     pa_assert(s);
584 
585     s->get_mute = cb;
586 }
587 
pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb)588 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
589     pa_sink_flags_t flags;
590 
591     pa_assert(s);
592 
593     s->set_mute = cb;
594 
595     /* Save the current flags so we can tell if they've changed */
596     flags = s->flags;
597 
598     if (cb)
599         s->flags |= PA_SINK_HW_MUTE_CTRL;
600     else
601         s->flags &= ~PA_SINK_HW_MUTE_CTRL;
602 
603     /* If the flags have changed after init, let any clients know via a change event */
604     if (s->state != PA_SINK_INIT && flags != s->flags)
605         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
606 }
607 
enable_flat_volume(pa_sink *s, bool enable)608 static void enable_flat_volume(pa_sink *s, bool enable) {
609     pa_sink_flags_t flags;
610 
611     pa_assert(s);
612 
613     /* Always follow the overall user preference here */
614     enable = enable && s->core->flat_volumes;
615 
616     /* Save the current flags so we can tell if they've changed */
617     flags = s->flags;
618 
619     if (enable)
620         s->flags |= PA_SINK_FLAT_VOLUME;
621     else
622         s->flags &= ~PA_SINK_FLAT_VOLUME;
623 
624     /* If the flags have changed after init, let any clients know via a change event */
625     if (s->state != PA_SINK_INIT && flags != s->flags)
626         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
627 }
628 
pa_sink_enable_decibel_volume(pa_sink *s, bool enable)629 void pa_sink_enable_decibel_volume(pa_sink *s, bool enable) {
630     pa_sink_flags_t flags;
631 
632     pa_assert(s);
633 
634     /* Save the current flags so we can tell if they've changed */
635     flags = s->flags;
636 
637     if (enable) {
638         s->flags |= PA_SINK_DECIBEL_VOLUME;
639         enable_flat_volume(s, true);
640     } else {
641         s->flags &= ~PA_SINK_DECIBEL_VOLUME;
642         enable_flat_volume(s, false);
643     }
644 
645     /* If the flags have changed after init, let any clients know via a change event */
646     if (s->state != PA_SINK_INIT && flags != s->flags)
647         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
648 }
649 
650 /* Called from main context */
pa_sink_put(pa_sink* s)651 void pa_sink_put(pa_sink* s) {
652     pa_sink_assert_ref(s);
653     pa_assert_ctl_context();
654 
655     pa_assert(s->state == PA_SINK_INIT);
656     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || pa_sink_is_filter(s));
657 
658     /* The following fields must be initialized properly when calling _put() */
659     pa_assert(s->asyncmsgq);
660     pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
661 
662     /* Generally, flags should be initialized via pa_sink_new(). As a
663      * special exception we allow some volume related flags to be set
664      * between _new() and _put() by the callback setter functions above.
665      *
666      * Thus we implement a couple safeguards here which ensure the above
667      * setters were used (or at least the implementor made manual changes
668      * in a compatible way).
669      *
670      * Note: All of these flags set here can change over the life time
671      * of the sink. */
672     pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
673     pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
674     pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
675 
676     /* XXX: Currently decibel volume is disabled for all sinks that use volume
677      * sharing. When the master sink supports decibel volume, it would be good
678      * to have the flag also in the filter sink, but currently we don't do that
679      * so that the flags of the filter sink never change when it's moved from
680      * a master sink to another. One solution for this problem would be to
681      * remove user-visible volume altogether from filter sinks when volume
682      * sharing is used, but the current approach was easier to implement... */
683     /* We always support decibel volumes in software, otherwise we leave it to
684      * the sink implementor to set this flag as needed.
685      *
686      * Note: This flag can also change over the life time of the sink. */
687     if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
688         pa_sink_enable_decibel_volume(s, true);
689         s->soft_volume = s->reference_volume;
690     }
691 
692     /* If the sink implementor support DB volumes by itself, we should always
693      * try and enable flat volumes too */
694     if ((s->flags & PA_SINK_DECIBEL_VOLUME))
695         enable_flat_volume(s, true);
696 
697     if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
698         pa_sink *root_sink = pa_sink_get_master(s);
699 
700         pa_assert(root_sink);
701 
702         s->reference_volume = root_sink->reference_volume;
703         pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
704 
705         s->real_volume = root_sink->real_volume;
706         pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
707     } else
708         /* We assume that if the sink implementor changed the default
709          * volume they did so in real_volume, because that is the usual
710          * place where they are supposed to place their changes.  */
711         s->reference_volume = s->real_volume;
712 
713     s->thread_info.soft_volume = s->soft_volume;
714     s->thread_info.soft_muted = s->muted;
715     pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
716 
717     pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
718               || (s->base_volume == PA_VOLUME_NORM
719                   && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
720     pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
721     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->thread_info.fixed_latency == 0));
722     pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
723     pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
724 
725     pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
726     pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
727     pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
728 
729     if (s->suspend_cause)
730         pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED, s->suspend_cause) == 0);
731     else
732         pa_assert_se(sink_set_state(s, PA_SINK_IDLE, 0) == 0);
733 
734     pa_source_put(s->monitor_source);
735 
736     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
737     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
738 
739     /* It's good to fire the SINK_PUT hook before updating the default sink,
740      * because module-switch-on-connect will set the new sink as the default
741      * sink, and if we were to call pa_core_update_default_sink() before that,
742      * the default sink might change twice, causing unnecessary stream moving. */
743 
744     pa_core_update_default_sink(s->core);
745 
746     pa_core_move_streams_to_newly_available_preferred_sink(s->core, s);
747 }
748 
749 /* Called from main context */
pa_sink_unlink(pa_sink* s)750 void pa_sink_unlink(pa_sink* s) {
751     bool linked;
752     pa_sink_input *i, PA_UNUSED *j = NULL;
753 
754     pa_sink_assert_ref(s);
755     pa_assert_ctl_context();
756 
757     /* Please note that pa_sink_unlink() does more than simply
758      * reversing pa_sink_put(). It also undoes the registrations
759      * already done in pa_sink_new()! */
760 
761     if (s->unlink_requested)
762         return;
763 
764     s->unlink_requested = true;
765 
766     linked = PA_SINK_IS_LINKED(s->state);
767 
768     if (linked)
769         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
770 
771     if (s->state != PA_SINK_UNLINKED)
772         pa_namereg_unregister(s->core, s->name);
773     pa_idxset_remove_by_data(s->core->sinks, s, NULL);
774 
775     pa_core_update_default_sink(s->core);
776 
777     if (linked && s->core->rescue_streams)
778 	pa_sink_move_streams_to_default_sink(s->core, s, false);
779 
780     if (s->card)
781         pa_idxset_remove_by_data(s->card->sinks, s, NULL);
782 
783     while ((i = pa_idxset_first(s->inputs, NULL))) {
784         pa_assert(i != j);
785         pa_sink_input_kill(i);
786         j = i;
787     }
788 
789     /* Unlink monitor source before unlinking the sink */
790     if (s->monitor_source)
791         pa_source_unlink(s->monitor_source);
792 
793     if (linked)
794         /* It's important to keep the suspend cause unchanged when unlinking,
795          * because if we remove the SESSION suspend cause here, the alsa sink
796          * will sync its volume with the hardware while another user is
797          * active, messing up the volume for that other user. */
798         sink_set_state(s, PA_SINK_UNLINKED, s->suspend_cause);
799     else
800         s->state = PA_SINK_UNLINKED;
801 
802     reset_callbacks(s);
803 
804     if (linked) {
805         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
806         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
807     }
808 }
809 
810 /* Called from main context */
sink_free(pa_object *o)811 static void sink_free(pa_object *o) {
812     pa_sink *s = PA_SINK(o);
813 
814     pa_assert(s);
815     pa_assert_ctl_context();
816     pa_assert(pa_sink_refcnt(s) == 0);
817     pa_assert(!PA_SINK_IS_LINKED(s->state));
818 
819     pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
820 
821     pa_sink_volume_change_flush(s);
822 
823     if (s->monitor_source) {
824         pa_source_unref(s->monitor_source);
825         s->monitor_source = NULL;
826     }
827 
828     pa_idxset_free(s->inputs, NULL);
829     pa_hashmap_free(s->thread_info.inputs);
830 
831     if (s->silence.memblock)
832         pa_memblock_unref(s->silence.memblock);
833 
834     pa_xfree(s->name);
835     pa_xfree(s->driver);
836 
837     if (s->proplist)
838         pa_proplist_free(s->proplist);
839 
840     if (s->ports)
841         pa_hashmap_free(s->ports);
842 
843     pa_xfree(s);
844 }
845 
846 /* Called from main context, and not while the IO thread is active, please */
pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q)847 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
848     pa_sink_assert_ref(s);
849     pa_assert_ctl_context();
850 
851     s->asyncmsgq = q;
852 
853     if (s->monitor_source)
854         pa_source_set_asyncmsgq(s->monitor_source, q);
855 }
856 
857 /* Called from main context, and not while the IO thread is active, please */
pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value)858 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
859     pa_sink_flags_t old_flags;
860     pa_sink_input *input;
861     uint32_t idx;
862 
863     pa_sink_assert_ref(s);
864     pa_assert_ctl_context();
865 
866     /* For now, allow only a minimal set of flags to be changed. */
867     pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
868 
869     old_flags = s->flags;
870     s->flags = (s->flags & ~mask) | (value & mask);
871 
872     if (s->flags == old_flags)
873         return;
874 
875     if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
876         pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
877 
878     if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
879         pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
880                      s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
881 
882     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
883     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
884 
885     if (s->monitor_source)
886         pa_source_update_flags(s->monitor_source,
887                                ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
888                                ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
889                                ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
890                                ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
891 
892     PA_IDXSET_FOREACH(input, s->inputs, idx) {
893         if (input->origin_sink)
894             pa_sink_update_flags(input->origin_sink, mask, value);
895     }
896 }
897 
898 /* Called from IO context, or before _put() from main context */
pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p)899 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
900     pa_sink_assert_ref(s);
901     pa_sink_assert_io_context(s);
902 
903     s->thread_info.rtpoll = p;
904 
905     if (s->monitor_source)
906         pa_source_set_rtpoll(s->monitor_source, p);
907 }
908 
909 /* Called from main context */
pa_sink_update_status(pa_sink*s)910 int pa_sink_update_status(pa_sink*s) {
911     pa_sink_assert_ref(s);
912     pa_assert_ctl_context();
913     pa_assert(PA_SINK_IS_LINKED(s->state));
914 
915     if (s->state == PA_SINK_SUSPENDED)
916         return 0;
917 
918     return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
919 }
920 
921 /* Called from main context */
pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause)922 int pa_sink_suspend(pa_sink *s, bool suspend, pa_suspend_cause_t cause) {
923     pa_suspend_cause_t merged_cause;
924 
925     pa_sink_assert_ref(s);
926     pa_assert_ctl_context();
927     pa_assert(PA_SINK_IS_LINKED(s->state));
928     pa_assert(cause != 0);
929 
930     if (suspend)
931         merged_cause = s->suspend_cause | cause;
932     else
933         merged_cause = s->suspend_cause & ~cause;
934 
935     if (merged_cause)
936         return sink_set_state(s, PA_SINK_SUSPENDED, merged_cause);
937     else
938         return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE, 0);
939 }
940 
941 /* Called from main context */
pa_sink_move_all_start(pa_sink *s, pa_queue *q)942 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
943     pa_sink_input *i, *n;
944     uint32_t idx;
945 
946     pa_sink_assert_ref(s);
947     pa_assert_ctl_context();
948     pa_assert(PA_SINK_IS_LINKED(s->state));
949 
950     if (!q)
951         q = pa_queue_new();
952 
953     for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
954         n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
955 
956         pa_sink_input_ref(i);
957 
958         if (pa_sink_input_start_move(i) >= 0)
959             pa_queue_push(q, i);
960         else
961             pa_sink_input_unref(i);
962     }
963 
964     return q;
965 }
966 
967 /* Called from main context */
pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save)968 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, bool save) {
969     pa_sink_input *i;
970 
971     pa_sink_assert_ref(s);
972     pa_assert_ctl_context();
973     pa_assert(PA_SINK_IS_LINKED(s->state));
974     pa_assert(q);
975 
976     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
977         if (PA_SINK_INPUT_IS_LINKED(i->state)) {
978             if (pa_sink_input_finish_move(i, s, save) < 0)
979                 pa_sink_input_fail_move(i);
980 
981         }
982         pa_sink_input_unref(i);
983     }
984 
985     pa_queue_free(q, NULL);
986 }
987 
988 /* Called from main context */
pa_sink_move_all_fail(pa_queue *q)989 void pa_sink_move_all_fail(pa_queue *q) {
990     pa_sink_input *i;
991 
992     pa_assert_ctl_context();
993     pa_assert(q);
994 
995     while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
996         pa_sink_input_fail_move(i);
997         pa_sink_input_unref(i);
998     }
999 
1000     pa_queue_free(q, NULL);
1001 }
1002 
1003  /* Called from IO thread context */
pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play)1004 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
1005     pa_sink_input *i;
1006     void *state = NULL;
1007     size_t result = 0;
1008 
1009     pa_sink_assert_ref(s);
1010     pa_sink_assert_io_context(s);
1011 
1012     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1013         size_t uf = i->thread_info.underrun_for_sink;
1014 
1015         /* Propagate down the filter tree */
1016         if (i->origin_sink) {
1017             size_t filter_result, left_to_play_origin;
1018 
1019             /* The combine sink sets i->origin sink but has a different threading model
1020              * than the filter sinks. Therefore the recursion below may not be executed
1021              * because pa_sink_process_input_underruns() was not called in the thread
1022              * context of the origin sink.
1023              * FIXME: It is unclear if some other kind of recursion would be necessary
1024              * for the combine sink. */
1025             if (!i->module || !pa_safe_streq(i->module->name, "module-combine-sink")) {
1026 
1027                 /* The recursive call works in the origin sink domain ... */
1028                 left_to_play_origin = pa_convert_size(left_to_play, &i->sink->sample_spec, &i->origin_sink->sample_spec);
1029 
1030                 /* .. and returns the time to sleep before waking up. We need the
1031                  * underrun duration for comparisons, so we undo the subtraction on
1032                  * the return value... */
1033                 filter_result = left_to_play_origin - pa_sink_process_input_underruns(i->origin_sink, left_to_play_origin);
1034 
1035                 /* ... and convert it back to the master sink domain */
1036                 filter_result = pa_convert_size(filter_result, &i->origin_sink->sample_spec, &i->sink->sample_spec);
1037 
1038                 /* Remember the longest underrun so far */
1039                 if (filter_result > result)
1040                     result = filter_result;
1041             }
1042         }
1043 
1044         if (uf == 0) {
1045             /* No underrun here, move on */
1046             continue;
1047         } else if (uf >= left_to_play) {
1048             /* The sink has possibly consumed all the data the sink input provided */
1049             pa_sink_input_process_underrun(i);
1050         } else if (uf > result) {
1051             /* Remember the longest underrun so far */
1052             result = uf;
1053         }
1054     }
1055 
1056     if (result > 0)
1057         pa_log_debug("%s: Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", s->name,
1058                 (long) result, (long) left_to_play - result);
1059     return left_to_play - result;
1060 }
1061 
1062 /* Called from IO thread context */
pa_sink_process_rewind(pa_sink *s, size_t nbytes)1063 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
1064     pa_sink_input *i;
1065     void *state = NULL;
1066 
1067     pa_sink_assert_ref(s);
1068     pa_sink_assert_io_context(s);
1069     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1070 
1071     /* If nobody requested this and this is actually no real rewind
1072      * then we can short cut this. Please note that this means that
1073      * not all rewind requests triggered upstream will always be
1074      * translated in actual requests! */
1075     if (!s->thread_info.rewind_requested && nbytes <= 0)
1076         return;
1077 
1078     s->thread_info.rewind_nbytes = 0;
1079     s->thread_info.rewind_requested = false;
1080 
1081     if (nbytes > 0) {
1082         pa_log_debug("Processing rewind...");
1083         if (s->flags & PA_SINK_DEFERRED_VOLUME)
1084             pa_sink_volume_change_rewind(s, nbytes);
1085     }
1086 
1087     /* Save rewind value */
1088     s->thread_info.last_rewind_nbytes = nbytes;
1089 
1090     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1091         pa_sink_input_assert_ref(i);
1092         pa_sink_input_process_rewind(i, nbytes);
1093     }
1094 
1095     if (nbytes > 0) {
1096         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1097             pa_source_process_rewind(s->monitor_source, nbytes);
1098     }
1099 }
1100 
1101 /* Called from IO thread context */
fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo)1102 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1103     pa_sink_input *i;
1104     unsigned n = 0;
1105     void *state = NULL;
1106     size_t mixlength = *length;
1107 
1108     pa_sink_assert_ref(s);
1109     pa_sink_assert_io_context(s);
1110     pa_assert(info);
1111 
1112     while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1113         pa_sink_input_assert_ref(i);
1114 
1115         pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1116 
1117         if (mixlength == 0 || info->chunk.length < mixlength)
1118             mixlength = info->chunk.length;
1119 
1120         if (pa_memblock_is_silence(info->chunk.memblock)) {
1121             pa_memblock_unref(info->chunk.memblock);
1122             continue;
1123         }
1124 
1125         info->userdata = pa_sink_input_ref(i);
1126 
1127         pa_assert(info->chunk.memblock);
1128         pa_assert(info->chunk.length > 0);
1129 
1130         info++;
1131         n++;
1132         maxinfo--;
1133     }
1134 
1135     if (mixlength > 0)
1136         *length = mixlength;
1137 
1138     return n;
1139 }
1140 
1141 /* Called from IO thread context */
inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result)1142 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1143     pa_sink_input *i;
1144     void *state;
1145     unsigned p = 0;
1146     unsigned n_unreffed = 0;
1147 
1148     pa_sink_assert_ref(s);
1149     pa_sink_assert_io_context(s);
1150     pa_assert(result);
1151     pa_assert(result->memblock);
1152     pa_assert(result->length > 0);
1153 
1154     /* We optimize for the case where the order of the inputs has not changed */
1155 
1156     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1157         unsigned j;
1158         pa_mix_info* m = NULL;
1159 
1160         pa_sink_input_assert_ref(i);
1161 
1162         /* Let's try to find the matching entry info the pa_mix_info array */
1163         for (j = 0; j < n; j ++) {
1164 
1165             if (info[p].userdata == i) {
1166                 m = info + p;
1167                 break;
1168             }
1169 
1170             p++;
1171             if (p >= n)
1172                 p = 0;
1173         }
1174 
1175         /* Drop read data */
1176         pa_sink_input_drop(i, result->length);
1177 
1178         if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1179 
1180             if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1181                 void *ostate = NULL;
1182                 pa_source_output *o;
1183                 pa_memchunk c;
1184 
1185                 if (m && m->chunk.memblock) {
1186                     c = m->chunk;
1187                     pa_memblock_ref(c.memblock);
1188                     pa_assert(result->length <= c.length);
1189                     c.length = result->length;
1190 
1191                     pa_memchunk_make_writable(&c, 0);
1192                     pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1193                 } else {
1194                     c = s->silence;
1195                     pa_memblock_ref(c.memblock);
1196                     pa_assert(result->length <= c.length);
1197                     c.length = result->length;
1198                 }
1199 
1200                 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1201                     pa_source_output_assert_ref(o);
1202                     pa_assert(o->direct_on_input == i);
1203                     pa_source_post_direct(s->monitor_source, o, &c);
1204                 }
1205 
1206                 pa_memblock_unref(c.memblock);
1207             }
1208         }
1209 
1210         if (m) {
1211             if (m->chunk.memblock) {
1212                 pa_memblock_unref(m->chunk.memblock);
1213                 pa_memchunk_reset(&m->chunk);
1214             }
1215 
1216             pa_sink_input_unref(m->userdata);
1217             m->userdata = NULL;
1218 
1219             n_unreffed += 1;
1220         }
1221     }
1222 
1223     /* Now drop references to entries that are included in the
1224      * pa_mix_info array but don't exist anymore */
1225 
1226     if (n_unreffed < n) {
1227         for (; n > 0; info++, n--) {
1228             if (info->userdata)
1229                 pa_sink_input_unref(info->userdata);
1230             if (info->chunk.memblock)
1231                 pa_memblock_unref(info->chunk.memblock);
1232         }
1233     }
1234 
1235     if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1236         pa_source_post(s->monitor_source, result);
1237 }
1238 
1239 /* Called from IO thread context */
pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result)1240 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1241     pa_mix_info info[MAX_MIX_CHANNELS];
1242     unsigned n;
1243     size_t block_size_max;
1244 
1245     pa_sink_assert_ref(s);
1246     pa_sink_assert_io_context(s);
1247     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1248     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1249     pa_assert(result);
1250 
1251     pa_assert(!s->thread_info.rewind_requested);
1252     pa_assert(s->thread_info.rewind_nbytes == 0);
1253 
1254     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1255         result->memblock = pa_memblock_ref(s->silence.memblock);
1256         result->index = s->silence.index;
1257         result->length = PA_MIN(s->silence.length, length);
1258         return;
1259     }
1260 
1261     pa_sink_ref(s);
1262 
1263     if (length <= 0)
1264         length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1265 
1266     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1267     if (length > block_size_max)
1268         length = pa_frame_align(block_size_max, &s->sample_spec);
1269 
1270     pa_assert(length > 0);
1271 
1272     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1273 
1274     if (n == 0) {
1275 
1276         *result = s->silence;
1277         pa_memblock_ref(result->memblock);
1278 
1279         if (result->length > length)
1280             result->length = length;
1281 
1282     } else if (n == 1) {
1283         pa_cvolume volume;
1284 
1285         *result = info[0].chunk;
1286         pa_memblock_ref(result->memblock);
1287 
1288         if (result->length > length)
1289             result->length = length;
1290 
1291         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1292 
1293         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1294             pa_memblock_unref(result->memblock);
1295             pa_silence_memchunk_get(&s->core->silence_cache,
1296                                     s->core->mempool,
1297                                     result,
1298                                     &s->sample_spec,
1299                                     result->length);
1300         } else if (!pa_cvolume_is_norm(&volume)) {
1301             pa_memchunk_make_writable(result, 0);
1302             pa_volume_memchunk(result, &s->sample_spec, &volume);
1303         }
1304     } else {
1305         void *ptr;
1306         result->memblock = pa_memblock_new(s->core->mempool, length);
1307 
1308         ptr = pa_memblock_acquire(result->memblock);
1309         result->length = pa_mix(info, n,
1310                                 ptr, length,
1311                                 &s->sample_spec,
1312                                 &s->thread_info.soft_volume,
1313                                 s->thread_info.soft_muted);
1314         pa_memblock_release(result->memblock);
1315 
1316         result->index = 0;
1317     }
1318 
1319     inputs_drop(s, info, n, result);
1320 
1321     pa_sink_unref(s);
1322 }
1323 
1324 /* Called from IO thread context */
pa_sink_render_into(pa_sink*s, pa_memchunk *target)1325 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1326     pa_mix_info info[MAX_MIX_CHANNELS];
1327     unsigned n;
1328     size_t length, block_size_max;
1329 
1330     pa_sink_assert_ref(s);
1331     pa_sink_assert_io_context(s);
1332     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1333     pa_assert(target);
1334     pa_assert(target->memblock);
1335     pa_assert(target->length > 0);
1336     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1337 
1338     pa_assert(!s->thread_info.rewind_requested);
1339     pa_assert(s->thread_info.rewind_nbytes == 0);
1340 
1341     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1342         pa_silence_memchunk(target, &s->sample_spec);
1343         return;
1344     }
1345 
1346     pa_sink_ref(s);
1347 
1348     length = target->length;
1349     block_size_max = pa_mempool_block_size_max(s->core->mempool);
1350     if (length > block_size_max)
1351         length = pa_frame_align(block_size_max, &s->sample_spec);
1352 
1353     pa_assert(length > 0);
1354 
1355     n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1356 
1357     if (n == 0) {
1358         if (target->length > length)
1359             target->length = length;
1360 
1361         pa_silence_memchunk(target, &s->sample_spec);
1362     } else if (n == 1) {
1363         pa_cvolume volume;
1364 
1365         if (target->length > length)
1366             target->length = length;
1367 
1368         pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1369 
1370         if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1371             pa_silence_memchunk(target, &s->sample_spec);
1372         else {
1373             pa_memchunk vchunk;
1374 
1375             vchunk = info[0].chunk;
1376             pa_memblock_ref(vchunk.memblock);
1377 
1378             if (vchunk.length > length)
1379                 vchunk.length = length;
1380 
1381             if (!pa_cvolume_is_norm(&volume)) {
1382                 pa_memchunk_make_writable(&vchunk, 0);
1383                 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1384             }
1385 
1386             pa_memchunk_memcpy(target, &vchunk);
1387             pa_memblock_unref(vchunk.memblock);
1388         }
1389 
1390     } else {
1391         void *ptr;
1392 
1393         ptr = pa_memblock_acquire(target->memblock);
1394 
1395         target->length = pa_mix(info, n,
1396                                 (uint8_t*) ptr + target->index, length,
1397                                 &s->sample_spec,
1398                                 &s->thread_info.soft_volume,
1399                                 s->thread_info.soft_muted);
1400 
1401         pa_memblock_release(target->memblock);
1402     }
1403 
1404     inputs_drop(s, info, n, target);
1405 
1406     pa_sink_unref(s);
1407 }
1408 
1409 /* Called from IO thread context */
pa_sink_render_into_full(pa_sink *s, pa_memchunk *target)1410 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1411     pa_memchunk chunk;
1412     size_t l, d;
1413 
1414     pa_sink_assert_ref(s);
1415     pa_sink_assert_io_context(s);
1416     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1417     pa_assert(target);
1418     pa_assert(target->memblock);
1419     pa_assert(target->length > 0);
1420     pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1421 
1422     pa_assert(!s->thread_info.rewind_requested);
1423     pa_assert(s->thread_info.rewind_nbytes == 0);
1424 
1425     if (s->thread_info.state == PA_SINK_SUSPENDED) {
1426         pa_silence_memchunk(target, &s->sample_spec);
1427         return;
1428     }
1429 
1430     pa_sink_ref(s);
1431 
1432     l = target->length;
1433     d = 0;
1434     while (l > 0) {
1435         chunk = *target;
1436         chunk.index += d;
1437         chunk.length -= d;
1438 
1439         pa_sink_render_into(s, &chunk);
1440 
1441         d += chunk.length;
1442         l -= chunk.length;
1443     }
1444 
1445     pa_sink_unref(s);
1446 }
1447 
1448 /* Called from IO thread context */
pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result)1449 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1450     pa_sink_assert_ref(s);
1451     pa_sink_assert_io_context(s);
1452     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1453     pa_assert(length > 0);
1454     pa_assert(pa_frame_aligned(length, &s->sample_spec));
1455     pa_assert(result);
1456 
1457     pa_assert(!s->thread_info.rewind_requested);
1458     pa_assert(s->thread_info.rewind_nbytes == 0);
1459 
1460     pa_sink_ref(s);
1461 
1462     pa_sink_render(s, length, result);
1463 
1464     if (result->length < length) {
1465         pa_memchunk chunk;
1466 
1467         pa_memchunk_make_writable(result, length);
1468 
1469         chunk.memblock = result->memblock;
1470         chunk.index = result->index + result->length;
1471         chunk.length = length - result->length;
1472 
1473         pa_sink_render_into_full(s, &chunk);
1474 
1475         result->length = length;
1476     }
1477 
1478     pa_sink_unref(s);
1479 }
1480 
1481 /* Called from main thread */
pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough)1482 void pa_sink_reconfigure(pa_sink *s, pa_sample_spec *spec, bool passthrough) {
1483     pa_sample_spec desired_spec;
1484     uint32_t default_rate = s->default_sample_rate;
1485     uint32_t alternate_rate = s->alternate_sample_rate;
1486     uint32_t idx;
1487     pa_sink_input *i;
1488     bool default_rate_is_usable = false;
1489     bool alternate_rate_is_usable = false;
1490     bool avoid_resampling = s->avoid_resampling;
1491 
1492     if (pa_sample_spec_equal(spec, &s->sample_spec))
1493         return;
1494 
1495     if (!s->reconfigure)
1496         return;
1497 
1498     if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough && !avoid_resampling)) {
1499         pa_log_debug("Default and alternate sample rates are the same, so there is no point in switching.");
1500         return;
1501     }
1502 
1503     if (PA_SINK_IS_RUNNING(s->state)) {
1504         pa_log_info("Cannot update sample spec, SINK_IS_RUNNING, will keep using %s and %u Hz",
1505                     pa_sample_format_to_string(s->sample_spec.format), s->sample_spec.rate);
1506         return;
1507     }
1508 
1509     if (s->monitor_source) {
1510         if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == true) {
1511             pa_log_info("Cannot update sample spec, monitor source is RUNNING");
1512             return;
1513         }
1514     }
1515 
1516     if (PA_UNLIKELY(!pa_sample_spec_valid(spec)))
1517         return;
1518 
1519     desired_spec = s->sample_spec;
1520 
1521     if (passthrough) {
1522         /* We have to try to use the sink input format and rate */
1523         desired_spec.format = spec->format;
1524         desired_spec.rate = spec->rate;
1525 
1526     } else if (avoid_resampling) {
1527         /* We just try to set the sink input's sample rate if it's not too low */
1528         if (spec->rate >= default_rate || spec->rate >= alternate_rate)
1529             desired_spec.rate = spec->rate;
1530         desired_spec.format = spec->format;
1531 
1532     } else if (default_rate == spec->rate || alternate_rate == spec->rate) {
1533         /* We can directly try to use this rate */
1534         desired_spec.rate = spec->rate;
1535 
1536     }
1537 
1538     if (desired_spec.rate != spec->rate) {
1539         /* See if we can pick a rate that results in less resampling effort */
1540         if (default_rate % 11025 == 0 && spec->rate % 11025 == 0)
1541             default_rate_is_usable = true;
1542         if (default_rate % 4000 == 0 && spec->rate % 4000 == 0)
1543             default_rate_is_usable = true;
1544         if (alternate_rate % 11025 == 0 && spec->rate % 11025 == 0)
1545             alternate_rate_is_usable = true;
1546         if (alternate_rate % 4000 == 0 && spec->rate % 4000 == 0)
1547             alternate_rate_is_usable = true;
1548 
1549         if (alternate_rate_is_usable && !default_rate_is_usable)
1550             desired_spec.rate = alternate_rate;
1551         else
1552             desired_spec.rate = default_rate;
1553     }
1554 
1555     if (pa_sample_spec_equal(&desired_spec, &s->sample_spec) && passthrough == pa_sink_is_passthrough(s))
1556         return;
1557 
1558     if (!passthrough && pa_sink_used_by(s) > 0)
1559         return;
1560 
1561     pa_log_debug("Suspending sink %s due to changing format, desired format = %s rate = %u",
1562                  s->name, pa_sample_format_to_string(desired_spec.format), desired_spec.rate);
1563     pa_sink_suspend(s, true, PA_SUSPEND_INTERNAL);
1564 
1565     s->reconfigure(s, &desired_spec, passthrough);
1566 
1567     /* update monitor source as well */
1568     if (s->monitor_source && !passthrough)
1569         pa_source_reconfigure(s->monitor_source, &s->sample_spec, false);
1570     pa_log_info("Reconfigured successfully");
1571 
1572     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1573         if (i->state == PA_SINK_INPUT_CORKED)
1574             pa_sink_input_update_resampler(i, true);
1575     }
1576 
1577     pa_sink_suspend(s, false, PA_SUSPEND_INTERNAL);
1578 }
1579 
1580 /* Called from main thread */
pa_sink_get_last_rewind(pa_sink *s)1581 size_t pa_sink_get_last_rewind(pa_sink *s) {
1582     size_t rewind_bytes;
1583 
1584     pa_sink_assert_ref(s);
1585     pa_assert_ctl_context();
1586     pa_assert(PA_SINK_IS_LINKED(s->state));
1587 
1588     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LAST_REWIND, &rewind_bytes, 0, NULL) == 0);
1589 
1590     return rewind_bytes;
1591 }
1592 
1593 /* Called from main thread */
pa_sink_get_latency(pa_sink *s)1594 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1595     int64_t usec = 0;
1596 
1597     pa_sink_assert_ref(s);
1598     pa_assert_ctl_context();
1599     pa_assert(PA_SINK_IS_LINKED(s->state));
1600 
1601     /* The returned value is supposed to be in the time domain of the sound card! */
1602 
1603     if (s->state == PA_SINK_SUSPENDED)
1604         return 0;
1605 
1606     if (!(s->flags & PA_SINK_LATENCY))
1607         return 0;
1608 
1609     if (s->asyncmsgq == NULL) {
1610         pa_log_error("pa_asyncmsgq is NULL");
1611         return 0;
1612     }
1613     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1614 
1615     /* the return value is unsigned, so check that the offset can be added to usec without
1616      * underflowing. */
1617     if (-s->port_latency_offset <= usec)
1618         usec += s->port_latency_offset;
1619     else
1620         usec = 0;
1621 
1622     return (pa_usec_t)usec;
1623 }
1624 
1625 /* Called from IO thread */
pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative)1626 int64_t pa_sink_get_latency_within_thread(pa_sink *s, bool allow_negative) {
1627     int64_t usec = 0;
1628     pa_msgobject *o;
1629 
1630     pa_sink_assert_ref(s);
1631     pa_sink_assert_io_context(s);
1632     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1633 
1634     /* The returned value is supposed to be in the time domain of the sound card! */
1635 
1636     if (s->thread_info.state == PA_SINK_SUSPENDED)
1637         return 0;
1638 
1639     if (!(s->flags & PA_SINK_LATENCY))
1640         return 0;
1641 
1642     o = PA_MSGOBJECT(s);
1643 
1644     /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1645 
1646     o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL);
1647 
1648     /* If allow_negative is false, the call should only return positive values, */
1649     usec += s->thread_info.port_latency_offset;
1650     if (!allow_negative && usec < 0)
1651         usec = 0;
1652 
1653     return usec;
1654 }
1655 
1656 /* Called from the main thread (and also from the IO thread while the main
1657  * thread is waiting).
1658  *
1659  * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1660  * set. Instead, flat volume mode is detected by checking whether the root sink
1661  * has the flag set. */
pa_sink_flat_volume_enabled(pa_sink *s)1662 bool pa_sink_flat_volume_enabled(pa_sink *s) {
1663     pa_sink_assert_ref(s);
1664 
1665     s = pa_sink_get_master(s);
1666 
1667     if (PA_LIKELY(s))
1668         return (s->flags & PA_SINK_FLAT_VOLUME);
1669     else
1670         return false;
1671 }
1672 
1673 /* Check if the sink has a virtual sink attached.
1674  * Called from the IO thread. */
pa_sink_has_filter_attached(pa_sink *s)1675 bool pa_sink_has_filter_attached(pa_sink *s) {
1676     bool vsink_attached = false;
1677     void *state = NULL;
1678     pa_sink_input *i;
1679 
1680     pa_assert(s);
1681 
1682     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1683         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1684             if (!i->origin_sink)
1685                 continue;
1686 
1687             vsink_attached = true;
1688             break;
1689         }
1690     }
1691     return vsink_attached;
1692 }
1693 
1694 /* Called from the main thread (and also from the IO thread while the main
1695  * thread is waiting). */
pa_sink_get_master(pa_sink *s)1696 pa_sink *pa_sink_get_master(pa_sink *s) {
1697     pa_sink_assert_ref(s);
1698 
1699     while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1700         if (PA_UNLIKELY(!s->input_to_master))
1701             return NULL;
1702 
1703         s = s->input_to_master->sink;
1704     }
1705 
1706     return s;
1707 }
1708 
1709 /* Called from main context */
pa_sink_is_filter(pa_sink *s)1710 bool pa_sink_is_filter(pa_sink *s) {
1711     pa_sink_assert_ref(s);
1712 
1713     return (s->input_to_master != NULL);
1714 }
1715 
1716 /* Called from main context */
pa_sink_is_passthrough(pa_sink *s)1717 bool pa_sink_is_passthrough(pa_sink *s) {
1718     pa_sink_input *alt_i;
1719     uint32_t idx;
1720 
1721     pa_sink_assert_ref(s);
1722 
1723     /* one and only one PASSTHROUGH input can possibly be connected */
1724     if (pa_idxset_size(s->inputs) == 1) {
1725         alt_i = pa_idxset_first(s->inputs, &idx);
1726 
1727         if (pa_sink_input_is_passthrough(alt_i))
1728             return true;
1729     }
1730 
1731     return false;
1732 }
1733 
1734 /* Called from main context */
pa_sink_enter_passthrough(pa_sink *s)1735 void pa_sink_enter_passthrough(pa_sink *s) {
1736     pa_cvolume volume;
1737 
1738     /* The sink implementation is reconfigured for passthrough in
1739      * pa_sink_reconfigure(). This function sets the PA core objects to
1740      * passthrough mode. */
1741 
1742     /* disable the monitor in passthrough mode */
1743     if (s->monitor_source) {
1744         pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1745         pa_source_suspend(s->monitor_source, true, PA_SUSPEND_PASSTHROUGH);
1746     }
1747 
1748     /* set the volume to NORM */
1749     s->saved_volume = *pa_sink_get_volume(s, true);
1750     s->saved_save_volume = s->save_volume;
1751 
1752     pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1753     pa_sink_set_volume(s, &volume, true, false);
1754 
1755     pa_log_debug("Suspending/Restarting sink %s to enter passthrough mode", s->name);
1756 }
1757 
1758 /* Called from main context */
pa_sink_leave_passthrough(pa_sink *s)1759 void pa_sink_leave_passthrough(pa_sink *s) {
1760     /* Unsuspend monitor */
1761     if (s->monitor_source) {
1762         pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1763         pa_source_suspend(s->monitor_source, false, PA_SUSPEND_PASSTHROUGH);
1764     }
1765 
1766     /* Restore sink volume to what it was before we entered passthrough mode */
1767     pa_sink_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1768 
1769     pa_cvolume_init(&s->saved_volume);
1770     s->saved_save_volume = false;
1771 
1772 }
1773 
1774 /* Called from main context. */
compute_reference_ratio(pa_sink_input *i)1775 static void compute_reference_ratio(pa_sink_input *i) {
1776     unsigned c = 0;
1777     pa_cvolume remapped;
1778     pa_cvolume ratio;
1779 
1780     pa_assert(i);
1781     pa_assert(pa_sink_flat_volume_enabled(i->sink));
1782 
1783     /*
1784      * Calculates the reference ratio from the sink's reference
1785      * volume. This basically calculates:
1786      *
1787      * i->reference_ratio = i->volume / i->sink->reference_volume
1788      */
1789 
1790     remapped = i->sink->reference_volume;
1791     pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1792 
1793     ratio = i->reference_ratio;
1794 
1795     for (c = 0; c < i->sample_spec.channels; c++) {
1796 
1797         /* We don't update when the sink volume is 0 anyway */
1798         if (remapped.values[c] <= PA_VOLUME_MUTED)
1799             continue;
1800 
1801         /* Don't update the reference ratio unless necessary */
1802         if (pa_sw_volume_multiply(
1803                     ratio.values[c],
1804                     remapped.values[c]) == i->volume.values[c])
1805             continue;
1806 
1807         ratio.values[c] = pa_sw_volume_divide(
1808                 i->volume.values[c],
1809                 remapped.values[c]);
1810     }
1811 
1812     pa_sink_input_set_reference_ratio(i, &ratio);
1813 }
1814 
1815 /* Called from main context. Only called for the root sink in volume sharing
1816  * cases, except for internal recursive calls. */
compute_reference_ratios(pa_sink *s)1817 static void compute_reference_ratios(pa_sink *s) {
1818     uint32_t idx;
1819     pa_sink_input *i;
1820 
1821     pa_sink_assert_ref(s);
1822     pa_assert_ctl_context();
1823     pa_assert(PA_SINK_IS_LINKED(s->state));
1824     pa_assert(pa_sink_flat_volume_enabled(s));
1825 
1826     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1827         compute_reference_ratio(i);
1828 
1829         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1830                 && PA_SINK_IS_LINKED(i->origin_sink->state))
1831             compute_reference_ratios(i->origin_sink);
1832     }
1833 }
1834 
1835 /* Called from main context. Only called for the root sink in volume sharing
1836  * cases, except for internal recursive calls. */
compute_real_ratios(pa_sink *s)1837 static void compute_real_ratios(pa_sink *s) {
1838     pa_sink_input *i;
1839     uint32_t idx;
1840 
1841     pa_sink_assert_ref(s);
1842     pa_assert_ctl_context();
1843     pa_assert(PA_SINK_IS_LINKED(s->state));
1844     pa_assert(pa_sink_flat_volume_enabled(s));
1845 
1846     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1847         unsigned c;
1848         pa_cvolume remapped;
1849 
1850         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1851             /* The origin sink uses volume sharing, so this input's real ratio
1852              * is handled as a special case - the real ratio must be 0 dB, and
1853              * as a result i->soft_volume must equal i->volume_factor. */
1854             pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1855             i->soft_volume = i->volume_factor;
1856 
1857             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1858                 compute_real_ratios(i->origin_sink);
1859 
1860             continue;
1861         }
1862 
1863         /*
1864          * This basically calculates:
1865          *
1866          * i->real_ratio := i->volume / s->real_volume
1867          * i->soft_volume := i->real_ratio * i->volume_factor
1868          */
1869 
1870         remapped = s->real_volume;
1871         pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1872 
1873         i->real_ratio.channels = i->sample_spec.channels;
1874         i->soft_volume.channels = i->sample_spec.channels;
1875 
1876         for (c = 0; c < i->sample_spec.channels; c++) {
1877 
1878             if (remapped.values[c] <= PA_VOLUME_MUTED) {
1879                 /* We leave i->real_ratio untouched */
1880                 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1881                 continue;
1882             }
1883 
1884             /* Don't lose accuracy unless necessary */
1885             if (pa_sw_volume_multiply(
1886                         i->real_ratio.values[c],
1887                         remapped.values[c]) != i->volume.values[c])
1888 
1889                 i->real_ratio.values[c] = pa_sw_volume_divide(
1890                         i->volume.values[c],
1891                         remapped.values[c]);
1892 
1893             i->soft_volume.values[c] = pa_sw_volume_multiply(
1894                     i->real_ratio.values[c],
1895                     i->volume_factor.values[c]);
1896         }
1897 
1898         /* We don't copy the soft_volume to the thread_info data
1899          * here. That must be done by the caller */
1900     }
1901 }
1902 
cvolume_remap_minimal_impact( pa_cvolume *v, const pa_cvolume *template, const pa_channel_map *from, const pa_channel_map *to)1903 static pa_cvolume *cvolume_remap_minimal_impact(
1904         pa_cvolume *v,
1905         const pa_cvolume *template,
1906         const pa_channel_map *from,
1907         const pa_channel_map *to) {
1908 
1909     pa_cvolume t;
1910 
1911     pa_assert(v);
1912     pa_assert(template);
1913     pa_assert(from);
1914     pa_assert(to);
1915     pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1916     pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1917 
1918     /* Much like pa_cvolume_remap(), but tries to minimize impact when
1919      * mapping from sink input to sink volumes:
1920      *
1921      * If template is a possible remapping from v it is used instead
1922      * of remapping anew.
1923      *
1924      * If the channel maps don't match we set an all-channel volume on
1925      * the sink to ensure that changing a volume on one stream has no
1926      * effect that cannot be compensated for in another stream that
1927      * does not have the same channel map as the sink. */
1928 
1929     if (pa_channel_map_equal(from, to))
1930         return v;
1931 
1932     t = *template;
1933     if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1934         *v = *template;
1935         return v;
1936     }
1937 
1938     pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1939     return v;
1940 }
1941 
1942 /* Called from main thread. Only called for the root sink in volume sharing
1943  * cases, except for internal recursive calls. */
get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map)1944 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1945     pa_sink_input *i;
1946     uint32_t idx;
1947 
1948     pa_sink_assert_ref(s);
1949     pa_assert(max_volume);
1950     pa_assert(channel_map);
1951     pa_assert(pa_sink_flat_volume_enabled(s));
1952 
1953     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1954         pa_cvolume remapped;
1955 
1956         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1957             if (PA_SINK_IS_LINKED(i->origin_sink->state))
1958                 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1959 
1960             /* Ignore this input. The origin sink uses volume sharing, so this
1961              * input's volume will be set to be equal to the root sink's real
1962              * volume. Obviously this input's current volume must not then
1963              * affect what the root sink's real volume will be. */
1964             continue;
1965         }
1966 
1967         remapped = i->volume;
1968         cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1969         pa_cvolume_merge(max_volume, max_volume, &remapped);
1970     }
1971 }
1972 
1973 /* Called from main thread. Only called for the root sink in volume sharing
1974  * cases, except for internal recursive calls. */
has_inputs(pa_sink *s)1975 static bool has_inputs(pa_sink *s) {
1976     pa_sink_input *i;
1977     uint32_t idx;
1978 
1979     pa_sink_assert_ref(s);
1980 
1981     PA_IDXSET_FOREACH(i, s->inputs, idx) {
1982         if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1983             return true;
1984     }
1985 
1986     return false;
1987 }
1988 
1989 /* Called from main thread. Only called for the root sink in volume sharing
1990  * cases, except for internal recursive calls. */
update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map)1991 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1992     pa_sink_input *i;
1993     uint32_t idx;
1994 
1995     pa_sink_assert_ref(s);
1996     pa_assert(new_volume);
1997     pa_assert(channel_map);
1998 
1999     s->real_volume = *new_volume;
2000     pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
2001 
2002     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2003         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2004             if (pa_sink_flat_volume_enabled(s)) {
2005                 pa_cvolume new_input_volume;
2006 
2007                 /* Follow the root sink's real volume. */
2008                 new_input_volume = *new_volume;
2009                 pa_cvolume_remap(&new_input_volume, channel_map, &i->channel_map);
2010                 pa_sink_input_set_volume_direct(i, &new_input_volume);
2011                 compute_reference_ratio(i);
2012             }
2013 
2014             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2015                 update_real_volume(i->origin_sink, new_volume, channel_map);
2016         }
2017     }
2018 }
2019 
2020 /* Called from main thread. Only called for the root sink in shared volume
2021  * cases. */
compute_real_volume(pa_sink *s)2022 static void compute_real_volume(pa_sink *s) {
2023     pa_sink_assert_ref(s);
2024     pa_assert_ctl_context();
2025     pa_assert(PA_SINK_IS_LINKED(s->state));
2026     pa_assert(pa_sink_flat_volume_enabled(s));
2027     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2028 
2029     /* This determines the maximum volume of all streams and sets
2030      * s->real_volume accordingly. */
2031 
2032     if (!has_inputs(s)) {
2033         /* In the special case that we have no sink inputs we leave the
2034          * volume unmodified. */
2035         update_real_volume(s, &s->reference_volume, &s->channel_map);
2036         return;
2037     }
2038 
2039     pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
2040 
2041     /* First let's determine the new maximum volume of all inputs
2042      * connected to this sink */
2043     get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
2044     update_real_volume(s, &s->real_volume, &s->channel_map);
2045 
2046     /* Then, let's update the real ratios/soft volumes of all inputs
2047      * connected to this sink */
2048     compute_real_ratios(s);
2049 }
2050 
2051 /* Called from main thread. Only called for the root sink in shared volume
2052  * cases, except for internal recursive calls. */
propagate_reference_volume(pa_sink *s)2053 static void propagate_reference_volume(pa_sink *s) {
2054     pa_sink_input *i;
2055     uint32_t idx;
2056 
2057     pa_sink_assert_ref(s);
2058     pa_assert_ctl_context();
2059     pa_assert(PA_SINK_IS_LINKED(s->state));
2060     pa_assert(pa_sink_flat_volume_enabled(s));
2061 
2062     /* This is called whenever the sink volume changes that is not
2063      * caused by a sink input volume change. We need to fix up the
2064      * sink input volumes accordingly */
2065 
2066     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2067         pa_cvolume new_volume;
2068 
2069         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2070             if (PA_SINK_IS_LINKED(i->origin_sink->state))
2071                 propagate_reference_volume(i->origin_sink);
2072 
2073             /* Since the origin sink uses volume sharing, this input's volume
2074              * needs to be updated to match the root sink's real volume, but
2075              * that will be done later in update_real_volume(). */
2076             continue;
2077         }
2078 
2079         /* This basically calculates:
2080          *
2081          * i->volume := s->reference_volume * i->reference_ratio  */
2082 
2083         new_volume = s->reference_volume;
2084         pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2085         pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2086         pa_sink_input_set_volume_direct(i, &new_volume);
2087     }
2088 }
2089 
2090 /* Called from main thread. Only called for the root sink in volume sharing
2091  * cases, except for internal recursive calls. The return value indicates
2092  * whether any reference volume actually changed. */
update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save)2093 static bool update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
2094     pa_cvolume volume;
2095     bool reference_volume_changed;
2096     pa_sink_input *i;
2097     uint32_t idx;
2098 
2099     pa_sink_assert_ref(s);
2100     pa_assert(PA_SINK_IS_LINKED(s->state));
2101     pa_assert(v);
2102     pa_assert(channel_map);
2103     pa_assert(pa_cvolume_valid(v));
2104 
2105     volume = *v;
2106     pa_cvolume_remap(&volume, channel_map, &s->channel_map);
2107 
2108     reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
2109     pa_sink_set_reference_volume_direct(s, &volume);
2110 
2111     s->save_volume = (!reference_volume_changed && s->save_volume) || save;
2112 
2113     if (!reference_volume_changed && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2114         /* If the root sink's volume doesn't change, then there can't be any
2115          * changes in the other sinks in the sink tree either.
2116          *
2117          * It's probably theoretically possible that even if the root sink's
2118          * volume changes slightly, some filter sink doesn't change its volume
2119          * due to rounding errors. If that happens, we still want to propagate
2120          * the changed root sink volume to the sinks connected to the
2121          * intermediate sink that didn't change its volume. This theoretical
2122          * possibility is the reason why we have that !(s->flags &
2123          * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
2124          * notice even if we returned here false always if
2125          * reference_volume_changed is false. */
2126         return false;
2127 
2128     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2129         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2130                 && PA_SINK_IS_LINKED(i->origin_sink->state))
2131             update_reference_volume(i->origin_sink, v, channel_map, false);
2132     }
2133 
2134     return true;
2135 }
2136 
2137 /* Called from main thread */
pa_sink_set_volume( pa_sink *s, const pa_cvolume *volume, bool send_msg, bool save)2138 void pa_sink_set_volume(
2139         pa_sink *s,
2140         const pa_cvolume *volume,
2141         bool send_msg,
2142         bool save) {
2143 
2144     pa_cvolume new_reference_volume;
2145     pa_sink *root_sink;
2146 
2147     pa_sink_assert_ref(s);
2148     pa_assert_ctl_context();
2149     pa_assert(PA_SINK_IS_LINKED(s->state));
2150     pa_assert(!volume || pa_cvolume_valid(volume));
2151     pa_assert(volume || pa_sink_flat_volume_enabled(s));
2152     pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
2153 
2154     /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
2155      * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
2156     if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
2157         pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2158         return;
2159     }
2160 
2161     /* In case of volume sharing, the volume is set for the root sink first,
2162      * from which it's then propagated to the sharing sinks. */
2163     root_sink = pa_sink_get_master(s);
2164 
2165     if (PA_UNLIKELY(!root_sink))
2166         return;
2167 
2168     /* As a special exception we accept mono volumes on all sinks --
2169      * even on those with more complex channel maps */
2170 
2171     if (volume) {
2172         if (pa_cvolume_compatible(volume, &s->sample_spec))
2173             new_reference_volume = *volume;
2174         else {
2175             new_reference_volume = s->reference_volume;
2176             pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2177         }
2178 
2179         pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2180 
2181         if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2182             if (pa_sink_flat_volume_enabled(root_sink)) {
2183                 /* OK, propagate this volume change back to the inputs */
2184                 propagate_reference_volume(root_sink);
2185 
2186                 /* And now recalculate the real volume */
2187                 compute_real_volume(root_sink);
2188             } else
2189                 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2190         }
2191 
2192     } else {
2193         /* If volume is NULL we synchronize the sink's real and
2194          * reference volumes with the stream volumes. */
2195 
2196         pa_assert(pa_sink_flat_volume_enabled(root_sink));
2197 
2198         /* Ok, let's determine the new real volume */
2199         compute_real_volume(root_sink);
2200 
2201         /* Let's 'push' the reference volume if necessary */
2202         pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2203         /* If the sink and its root don't have the same number of channels, we need to remap */
2204         if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2205             pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2206         update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2207 
2208         /* Now that the reference volume is updated, we can update the streams'
2209          * reference ratios. */
2210         compute_reference_ratios(root_sink);
2211     }
2212 
2213     if (root_sink->set_volume) {
2214         /* If we have a function set_volume(), then we do not apply a
2215          * soft volume by default. However, set_volume() is free to
2216          * apply one to root_sink->soft_volume */
2217 
2218         pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2219         if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2220             root_sink->set_volume(root_sink);
2221 
2222     } else
2223         /* If we have no function set_volume(), then the soft volume
2224          * becomes the real volume */
2225         root_sink->soft_volume = root_sink->real_volume;
2226 
2227     /* This tells the sink that soft volume and/or real volume changed */
2228     if (send_msg)
2229         pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2230 }
2231 
2232 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2233  * Only to be called by sink implementor */
pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume)2234 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2235 
2236     pa_sink_assert_ref(s);
2237     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2238 
2239     if (s->flags & PA_SINK_DEFERRED_VOLUME)
2240         pa_sink_assert_io_context(s);
2241     else
2242         pa_assert_ctl_context();
2243 
2244     if (!volume)
2245         pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2246     else
2247         s->soft_volume = *volume;
2248 
2249     if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2250         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2251     else
2252         s->thread_info.soft_volume = s->soft_volume;
2253 }
2254 
2255 /* Called from the main thread. Only called for the root sink in volume sharing
2256  * cases, except for internal recursive calls. */
propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume)2257 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2258     pa_sink_input *i;
2259     uint32_t idx;
2260 
2261     pa_sink_assert_ref(s);
2262     pa_assert(old_real_volume);
2263     pa_assert_ctl_context();
2264     pa_assert(PA_SINK_IS_LINKED(s->state));
2265 
2266     /* This is called when the hardware's real volume changes due to
2267      * some external event. We copy the real volume into our
2268      * reference volume and then rebuild the stream volumes based on
2269      * i->real_ratio which should stay fixed. */
2270 
2271     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2272         if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2273             return;
2274 
2275         /* 1. Make the real volume the reference volume */
2276         update_reference_volume(s, &s->real_volume, &s->channel_map, true);
2277     }
2278 
2279     if (pa_sink_flat_volume_enabled(s)) {
2280 
2281         PA_IDXSET_FOREACH(i, s->inputs, idx) {
2282             pa_cvolume new_volume;
2283 
2284             /* 2. Since the sink's reference and real volumes are equal
2285              * now our ratios should be too. */
2286             pa_sink_input_set_reference_ratio(i, &i->real_ratio);
2287 
2288             /* 3. Recalculate the new stream reference volume based on the
2289              * reference ratio and the sink's reference volume.
2290              *
2291              * This basically calculates:
2292              *
2293              * i->volume = s->reference_volume * i->reference_ratio
2294              *
2295              * This is identical to propagate_reference_volume() */
2296             new_volume = s->reference_volume;
2297             pa_cvolume_remap(&new_volume, &s->channel_map, &i->channel_map);
2298             pa_sw_cvolume_multiply(&new_volume, &new_volume, &i->reference_ratio);
2299             pa_sink_input_set_volume_direct(i, &new_volume);
2300 
2301             if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2302                     && PA_SINK_IS_LINKED(i->origin_sink->state))
2303                 propagate_real_volume(i->origin_sink, old_real_volume);
2304         }
2305     }
2306 
2307     /* Something got changed in the hardware. It probably makes sense
2308      * to save changed hw settings given that hw volume changes not
2309      * triggered by PA are almost certainly done by the user. */
2310     if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2311         s->save_volume = true;
2312 }
2313 
2314 /* Called from io thread */
pa_sink_update_volume_and_mute(pa_sink *s)2315 void pa_sink_update_volume_and_mute(pa_sink *s) {
2316     pa_assert(s);
2317     pa_sink_assert_io_context(s);
2318 
2319     pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2320 }
2321 
2322 /* Called from main thread */
pa_sink_get_volume(pa_sink *s, bool force_refresh)2323 const pa_cvolume *pa_sink_get_volume(pa_sink *s, bool force_refresh) {
2324     pa_sink_assert_ref(s);
2325     pa_assert_ctl_context();
2326     pa_assert(PA_SINK_IS_LINKED(s->state));
2327 
2328     if (s->refresh_volume || force_refresh) {
2329         struct pa_cvolume old_real_volume;
2330 
2331         pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2332 
2333         old_real_volume = s->real_volume;
2334 
2335         if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2336             s->get_volume(s);
2337 
2338         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2339 
2340         update_real_volume(s, &s->real_volume, &s->channel_map);
2341         propagate_real_volume(s, &old_real_volume);
2342     }
2343 
2344     return &s->reference_volume;
2345 }
2346 
2347 /* Called from main thread. In volume sharing cases, only the root sink may
2348  * call this. */
pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume)2349 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2350     pa_cvolume old_real_volume;
2351 
2352     pa_sink_assert_ref(s);
2353     pa_assert_ctl_context();
2354     pa_assert(PA_SINK_IS_LINKED(s->state));
2355     pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2356 
2357     /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2358 
2359     old_real_volume = s->real_volume;
2360     update_real_volume(s, new_real_volume, &s->channel_map);
2361     propagate_real_volume(s, &old_real_volume);
2362 }
2363 
2364 /* Called from main thread */
pa_sink_set_mute(pa_sink *s, bool mute, bool save)2365 void pa_sink_set_mute(pa_sink *s, bool mute, bool save) {
2366     bool old_muted;
2367 
2368     pa_sink_assert_ref(s);
2369     pa_assert_ctl_context();
2370 
2371     old_muted = s->muted;
2372 
2373     if (mute == old_muted) {
2374         s->save_muted |= save;
2375         return;
2376     }
2377 
2378     s->muted = mute;
2379     s->save_muted = save;
2380 
2381     if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute) {
2382         s->set_mute_in_progress = true;
2383         s->set_mute(s);
2384         s->set_mute_in_progress = false;
2385     }
2386 
2387     if (!PA_SINK_IS_LINKED(s->state))
2388         return;
2389 
2390     pa_log_debug("The mute of sink %s changed from %s to %s.", s->name, pa_yes_no(old_muted), pa_yes_no(mute));
2391     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2392     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2393     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_MUTE_CHANGED], s);
2394 }
2395 
2396 /* Called from main thread */
pa_sink_get_mute(pa_sink *s, bool force_refresh)2397 bool pa_sink_get_mute(pa_sink *s, bool force_refresh) {
2398 
2399     pa_sink_assert_ref(s);
2400     pa_assert_ctl_context();
2401     pa_assert(PA_SINK_IS_LINKED(s->state));
2402 
2403     if ((s->refresh_muted || force_refresh) && s->get_mute) {
2404         bool mute;
2405 
2406         if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2407             if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, &mute, 0, NULL) >= 0)
2408                 pa_sink_mute_changed(s, mute);
2409         } else {
2410             if (s->get_mute(s, &mute) >= 0)
2411                 pa_sink_mute_changed(s, mute);
2412         }
2413     }
2414 
2415     return s->muted;
2416 }
2417 
2418 /* Called from main thread */
pa_sink_mute_changed(pa_sink *s, bool new_muted)2419 void pa_sink_mute_changed(pa_sink *s, bool new_muted) {
2420     pa_sink_assert_ref(s);
2421     pa_assert_ctl_context();
2422     pa_assert(PA_SINK_IS_LINKED(s->state));
2423 
2424     if (s->set_mute_in_progress)
2425         return;
2426 
2427     /* pa_sink_set_mute() does this same check, so this may appear redundant,
2428      * but we must have this here also, because the save parameter of
2429      * pa_sink_set_mute() would otherwise have unintended side effects (saving
2430      * the mute state when it shouldn't be saved). */
2431     if (new_muted == s->muted)
2432         return;
2433 
2434     pa_sink_set_mute(s, new_muted, true);
2435 }
2436 
2437 /* Called from main thread */
pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p)2438 bool pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2439     pa_sink_assert_ref(s);
2440     pa_assert_ctl_context();
2441 
2442     if (p)
2443         pa_proplist_update(s->proplist, mode, p);
2444 
2445     if (PA_SINK_IS_LINKED(s->state)) {
2446         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2447         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2448     }
2449 
2450     return true;
2451 }
2452 
2453 /* Called from main thread */
2454 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
pa_sink_set_description(pa_sink *s, const char *description)2455 void pa_sink_set_description(pa_sink *s, const char *description) {
2456     const char *old;
2457     pa_sink_assert_ref(s);
2458     pa_assert_ctl_context();
2459 
2460     if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2461         return;
2462 
2463     old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2464 
2465     if (old && description && pa_streq(old, description))
2466         return;
2467 
2468     if (description)
2469         pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2470     else
2471         pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2472 
2473     if (s->monitor_source) {
2474         char *n;
2475 
2476         n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2477         pa_source_set_description(s->monitor_source, n);
2478         pa_xfree(n);
2479     }
2480 
2481     if (PA_SINK_IS_LINKED(s->state)) {
2482         pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2483         pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2484     }
2485 }
2486 
2487 /* Called from main thread */
pa_sink_linked_by(pa_sink *s)2488 unsigned pa_sink_linked_by(pa_sink *s) {
2489     unsigned ret;
2490 
2491     pa_sink_assert_ref(s);
2492     pa_assert_ctl_context();
2493     pa_assert(PA_SINK_IS_LINKED(s->state));
2494 
2495     ret = pa_idxset_size(s->inputs);
2496 
2497     /* We add in the number of streams connected to us here. Please
2498      * note the asymmetry to pa_sink_used_by()! */
2499 
2500     if (s->monitor_source)
2501         ret += pa_source_linked_by(s->monitor_source);
2502 
2503     return ret;
2504 }
2505 
2506 /* Called from main thread */
pa_sink_used_by(pa_sink *s)2507 unsigned pa_sink_used_by(pa_sink *s) {
2508     unsigned ret;
2509 
2510     pa_sink_assert_ref(s);
2511     pa_assert_ctl_context();
2512     pa_assert(PA_SINK_IS_LINKED(s->state));
2513 
2514     ret = pa_idxset_size(s->inputs);
2515     pa_assert(ret >= s->n_corked);
2516 
2517     /* Streams connected to our monitor source do not matter for
2518      * pa_sink_used_by()!.*/
2519 
2520     return ret - s->n_corked;
2521 }
2522 
2523 /* Called from main thread */
pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output)2524 unsigned pa_sink_check_suspend(pa_sink *s, pa_sink_input *ignore_input, pa_source_output *ignore_output) {
2525     unsigned ret;
2526     pa_sink_input *i;
2527     uint32_t idx;
2528 
2529     pa_sink_assert_ref(s);
2530     pa_assert_ctl_context();
2531 
2532     if (!PA_SINK_IS_LINKED(s->state))
2533         return 0;
2534 
2535     ret = 0;
2536 
2537     PA_IDXSET_FOREACH(i, s->inputs, idx) {
2538         if (i == ignore_input)
2539             continue;
2540 
2541         /* We do not assert here. It is perfectly valid for a sink input to
2542          * be in the INIT state (i.e. created, marked done but not yet put)
2543          * and we should not care if it's unlinked as it won't contribute
2544          * towards our busy status.
2545          */
2546         if (!PA_SINK_INPUT_IS_LINKED(i->state))
2547             continue;
2548 
2549         if (i->state == PA_SINK_INPUT_CORKED)
2550             continue;
2551 
2552         if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2553             continue;
2554 
2555         ret ++;
2556     }
2557 
2558     if (s->monitor_source)
2559         ret += pa_source_check_suspend(s->monitor_source, ignore_output);
2560 
2561     return ret;
2562 }
2563 
pa_sink_state_to_string(pa_sink_state_t state)2564 const char *pa_sink_state_to_string(pa_sink_state_t state) {
2565     switch (state) {
2566         case PA_SINK_INIT:          return "INIT";
2567         case PA_SINK_IDLE:          return "IDLE";
2568         case PA_SINK_RUNNING:       return "RUNNING";
2569         case PA_SINK_SUSPENDED:     return "SUSPENDED";
2570         case PA_SINK_UNLINKED:      return "UNLINKED";
2571         case PA_SINK_INVALID_STATE: return "INVALID_STATE";
2572     }
2573 
2574     pa_assert_not_reached();
2575 }
2576 
2577 /* Called from the IO thread */
sync_input_volumes_within_thread(pa_sink *s)2578 static void sync_input_volumes_within_thread(pa_sink *s) {
2579     pa_sink_input *i;
2580     void *state = NULL;
2581 
2582     pa_sink_assert_ref(s);
2583     pa_sink_assert_io_context(s);
2584 
2585     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2586         if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2587             continue;
2588 
2589         i->thread_info.soft_volume = i->soft_volume;
2590         pa_sink_input_request_rewind(i, 0, true, false, false);
2591     }
2592 }
2593 
2594 /* Called from the IO thread. Only called for the root sink in volume sharing
2595  * cases, except for internal recursive calls. */
set_shared_volume_within_thread(pa_sink *s)2596 static void set_shared_volume_within_thread(pa_sink *s) {
2597     pa_sink_input *i = NULL;
2598     void *state = NULL;
2599 
2600     pa_sink_assert_ref(s);
2601 
2602     PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2603 
2604     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2605         if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2606             set_shared_volume_within_thread(i->origin_sink);
2607     }
2608 }
2609 
2610 /* Called from IO thread. Gets max_rewind limit from sink inputs.
2611  * This function is used to communicate the max_rewind value of a
2612  * virtual sink to the master sink. The get_max_rewind_limit()
2613  * callback is implemented by sink inputs connecting a virtual
2614  * sink to its master. */
get_max_rewind_limit(pa_sink *s, size_t requested_limit)2615 static size_t get_max_rewind_limit(pa_sink *s, size_t requested_limit) {
2616     pa_sink_input *i;
2617     void *state = NULL;
2618     size_t rewind_limit;
2619 
2620     pa_assert(s);
2621 
2622     /* Get rewind limit in sink sample spec from sink inputs */
2623     rewind_limit = (size_t)(-1);
2624     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2625         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2626 
2627             if (i->get_max_rewind_limit) {
2628                 size_t limit;
2629 
2630                 limit = i->get_max_rewind_limit(i);
2631                 if (rewind_limit == (size_t)(-1) || rewind_limit > limit)
2632                     rewind_limit = limit;
2633             }
2634         }
2635     }
2636 
2637     /* Set max_rewind */
2638     if (rewind_limit != (size_t)(-1))
2639         requested_limit = PA_MIN(rewind_limit, requested_limit);
2640 
2641     return requested_limit;
2642 }
2643 
2644 /* Called from IO thread, except when it is not */
pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk)2645 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2646     pa_sink *s = PA_SINK(o);
2647     pa_sink_assert_ref(s);
2648 
2649     switch ((pa_sink_message_t) code) {
2650 
2651         case PA_SINK_MESSAGE_ADD_INPUT: {
2652             pa_sink_input *i = PA_SINK_INPUT(userdata);
2653 
2654             /* If you change anything here, make sure to change the
2655              * sink input handling a few lines down at
2656              * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2657 
2658             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2659 
2660             /* Since the caller sleeps in pa_sink_input_put(), we can
2661              * safely access data outside of thread_info even though
2662              * it is mutable */
2663 
2664             if ((i->thread_info.sync_prev = i->sync_prev)) {
2665                 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2666                 pa_assert(i->sync_prev->sync_next == i);
2667                 i->thread_info.sync_prev->thread_info.sync_next = i;
2668             }
2669 
2670             if ((i->thread_info.sync_next = i->sync_next)) {
2671                 pa_assert(i->sink == i->thread_info.sync_next->sink);
2672                 pa_assert(i->sync_next->sync_prev == i);
2673                 i->thread_info.sync_next->thread_info.sync_prev = i;
2674             }
2675 
2676             pa_sink_input_attach(i);
2677 
2678             pa_sink_input_set_state_within_thread(i, i->state);
2679 
2680             /* The requested latency of the sink input needs to be fixed up and
2681              * then configured on the sink. If this causes the sink latency to
2682              * go down, the sink implementor is responsible for doing a rewind
2683              * in the update_requested_latency() callback to ensure that the
2684              * sink buffer doesn't contain more data than what the new latency
2685              * allows.
2686              *
2687              * XXX: Does it really make sense to push this responsibility to
2688              * the sink implementors? Wouldn't it be better to do it once in
2689              * the core than many times in the modules? */
2690 
2691             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2692                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2693 
2694             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2695             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2696 
2697             /* We don't rewind here automatically. This is left to the
2698              * sink input implementor because some sink inputs need a
2699              * slow start, i.e. need some time to buffer client
2700              * samples before beginning streaming.
2701              *
2702              * XXX: Does it really make sense to push this functionality to
2703              * the sink implementors? Wouldn't it be better to do it once in
2704              * the core than many times in the modules? */
2705 
2706             /* In flat volume mode we need to update the volume as
2707              * well */
2708             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2709         }
2710 
2711         case PA_SINK_MESSAGE_REMOVE_INPUT: {
2712             pa_sink_input *i = PA_SINK_INPUT(userdata);
2713 
2714             /* If you change anything here, make sure to change the
2715              * sink input handling a few lines down at
2716              * PA_SINK_MESSAGE_START_MOVE, too. */
2717 
2718             pa_sink_input_detach(i);
2719 
2720             pa_sink_input_set_state_within_thread(i, i->state);
2721 
2722             /* Since the caller sleeps in pa_sink_input_unlink(),
2723              * we can safely access data outside of thread_info even
2724              * though it is mutable */
2725 
2726             pa_assert(!i->sync_prev);
2727             pa_assert(!i->sync_next);
2728 
2729             if (i->thread_info.sync_prev) {
2730                 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2731                 i->thread_info.sync_prev = NULL;
2732             }
2733 
2734             if (i->thread_info.sync_next) {
2735                 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2736                 i->thread_info.sync_next = NULL;
2737             }
2738 
2739             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2740             pa_sink_request_rewind(s, (size_t) -1);
2741             pa_sink_invalidate_requested_latency(s, true);
2742 
2743             /* In flat volume mode we need to update the volume as
2744              * well */
2745             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2746         }
2747 
2748         case PA_SINK_MESSAGE_START_MOVE: {
2749             pa_sink_input *i = PA_SINK_INPUT(userdata);
2750 
2751             /* We don't support moving synchronized streams. */
2752             pa_assert(!i->sync_prev);
2753             pa_assert(!i->sync_next);
2754             pa_assert(!i->thread_info.sync_next);
2755             pa_assert(!i->thread_info.sync_prev);
2756 
2757             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2758 
2759                 /* The old sink probably has some audio from this
2760                  * stream in its buffer. We want to "take it back" as
2761                  * much as possible and play it to the new sink. We
2762                  * don't know at this point how much the old sink can
2763                  * rewind, so we just save some values and reconstruct
2764                  * the render memblockq in finish_move(). */
2765 
2766                 /* Save some current values for restore_render_memblockq() */
2767                 i->thread_info.origin_sink_latency = pa_sink_get_latency_within_thread(s, false);
2768                 i->thread_info.move_start_time = pa_rtclock_now();
2769                 i->thread_info.resampler_delay_frames = 0;
2770                 if (i->thread_info.resampler)
2771                     /* Round down */
2772                     i->thread_info.resampler_delay_frames = pa_resampler_get_delay(i->thread_info.resampler, false);
2773             }
2774 
2775             pa_sink_input_detach(i);
2776 
2777             /* Let's remove the sink input ...*/
2778             pa_hashmap_remove_and_free(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index));
2779 
2780             /* The rewind must be requested before invalidating the latency, otherwise
2781              * the max_rewind value of the sink may change before the rewind. */
2782             pa_log_debug("Requesting rewind due to started move");
2783             pa_sink_request_rewind(s, (size_t) -1);
2784 
2785             pa_sink_invalidate_requested_latency(s, true);
2786 
2787             /* In flat volume mode we need to update the volume as
2788              * well */
2789             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2790         }
2791 
2792         case PA_SINK_MESSAGE_FINISH_MOVE: {
2793             pa_sink_input *i = PA_SINK_INPUT(userdata);
2794 
2795             /* We don't support moving synchronized streams. */
2796             pa_assert(!i->sync_prev);
2797             pa_assert(!i->sync_next);
2798             pa_assert(!i->thread_info.sync_next);
2799             pa_assert(!i->thread_info.sync_prev);
2800 
2801             pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2802 
2803             pa_sink_input_attach(i);
2804 
2805             if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2806                 pa_usec_t usec = 0;
2807                 size_t nbytes, delay_bytes;
2808 
2809                 /* In the ideal case the new sink would start playing
2810                  * the stream immediately. That requires the sink to
2811                  * be able to rewind all of its latency, which usually
2812                  * isn't possible, so there will probably be some gap
2813                  * before the moved stream becomes audible. We then
2814                  * have two possibilities: 1) start playing the stream
2815                  * from where it is now, or 2) drop the unrewindable
2816                  * latency of the sink from the stream. With option 1
2817                  * we won't lose any audio but the stream will have a
2818                  * pause. With option 2 we may lose some audio but the
2819                  * stream time will be somewhat in sync with the wall
2820                  * clock. Lennart seems to have chosen option 2 (one
2821                  * of the reasons might have been that option 1 is
2822                  * actually much harder to implement), so we drop the
2823                  * latency of the new sink from the moved stream and
2824                  * hope that the sink will undo most of that in the
2825                  * rewind. */
2826 
2827                 /* Get the latency of the sink */
2828                 usec = pa_sink_get_latency_within_thread(s, false);
2829                 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2830 
2831                 /* Calculate number of samples that have been played during the move */
2832                 delay_bytes = 0;
2833                 if (i->thread_info.move_start_time > 0) {
2834                     usec = pa_rtclock_now() - i->thread_info.move_start_time;
2835                     delay_bytes = pa_usec_to_bytes(usec, &s->sample_spec);
2836                 }
2837 
2838                 /* max_rewind must be updated for the sink input because otherwise
2839                  * the data in the render memblockq will get lost */
2840                 pa_sink_input_update_max_rewind(i, nbytes);
2841 
2842                 if (nbytes + delay_bytes > 0)
2843                     pa_sink_input_drop(i, nbytes + delay_bytes);
2844 
2845                 pa_log_debug("Requesting rewind due to finished move");
2846                 pa_sink_request_rewind(s, nbytes);
2847             }
2848 
2849             /* Updating the requested sink latency has to be done
2850              * after the sink rewind request, not before, because
2851              * otherwise the sink may limit the rewind amount
2852              * needlessly. */
2853 
2854             if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2855                 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2856 
2857             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2858             pa_sink_input_update_max_request(i, s->thread_info.max_request);
2859 
2860             /* Reset move variables */
2861             i->thread_info.move_start_time = 0;
2862             i->thread_info.resampler_delay_frames = 0;
2863             i->thread_info.origin_sink_latency = 0;
2864 
2865             return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2866         }
2867 
2868         case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2869             pa_sink *root_sink = pa_sink_get_master(s);
2870 
2871             if (PA_LIKELY(root_sink))
2872                 set_shared_volume_within_thread(root_sink);
2873 
2874             return 0;
2875         }
2876 
2877         case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2878 
2879             if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2880                 s->set_volume(s);
2881                 pa_sink_volume_change_push(s);
2882             }
2883             /* Fall through ... */
2884 
2885         case PA_SINK_MESSAGE_SET_VOLUME:
2886 
2887             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2888                 s->thread_info.soft_volume = s->soft_volume;
2889                 pa_sink_request_rewind(s, (size_t) -1);
2890             }
2891 
2892             /* Fall through ... */
2893 
2894         case PA_SINK_MESSAGE_SYNC_VOLUMES:
2895             sync_input_volumes_within_thread(s);
2896             return 0;
2897 
2898         case PA_SINK_MESSAGE_GET_VOLUME:
2899 
2900             if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2901                 s->get_volume(s);
2902                 pa_sink_volume_change_flush(s);
2903                 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2904             }
2905 
2906             /* In case sink implementor reset SW volume. */
2907             if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2908                 s->thread_info.soft_volume = s->soft_volume;
2909                 pa_sink_request_rewind(s, (size_t) -1);
2910             }
2911 
2912             return 0;
2913 
2914         case PA_SINK_MESSAGE_SET_MUTE:
2915 
2916             if (s->thread_info.soft_muted != s->muted) {
2917                 s->thread_info.soft_muted = s->muted;
2918                 pa_sink_request_rewind(s, (size_t) -1);
2919             }
2920 
2921             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2922                 s->set_mute(s);
2923 
2924             return 0;
2925 
2926         case PA_SINK_MESSAGE_GET_MUTE:
2927 
2928             if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2929                 return s->get_mute(s, userdata);
2930 
2931             return 0;
2932 
2933         case PA_SINK_MESSAGE_SET_STATE: {
2934             struct set_state_data *data = userdata;
2935             bool suspend_change =
2936                 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(data->state)) ||
2937                 (PA_SINK_IS_OPENED(s->thread_info.state) && data->state == PA_SINK_SUSPENDED);
2938 
2939             if (s->set_state_in_io_thread) {
2940                 int r;
2941 
2942                 if ((r = s->set_state_in_io_thread(s, data->state, data->suspend_cause)) < 0)
2943                     return r;
2944             }
2945 
2946             s->thread_info.state = data->state;
2947 
2948             if (s->thread_info.state == PA_SINK_SUSPENDED) {
2949                 s->thread_info.rewind_nbytes = 0;
2950                 s->thread_info.rewind_requested = false;
2951             }
2952 
2953             if (suspend_change) {
2954                 pa_sink_input *i;
2955                 void *state = NULL;
2956 
2957                 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2958                     if (i->suspend_within_thread)
2959                         i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2960             }
2961 
2962             return 0;
2963         }
2964 
2965         case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2966 
2967             pa_usec_t *usec = userdata;
2968             *usec = pa_sink_get_requested_latency_within_thread(s);
2969 
2970             /* Yes, that's right, the IO thread will see -1 when no
2971              * explicit requested latency is configured, the main
2972              * thread will see max_latency */
2973             if (*usec == (pa_usec_t) -1)
2974                 *usec = s->thread_info.max_latency;
2975 
2976             return 0;
2977         }
2978 
2979         case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2980             pa_usec_t *r = userdata;
2981 
2982             pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2983 
2984             return 0;
2985         }
2986 
2987         case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2988             pa_usec_t *r = userdata;
2989 
2990             r[0] = s->thread_info.min_latency;
2991             r[1] = s->thread_info.max_latency;
2992 
2993             return 0;
2994         }
2995 
2996         case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2997 
2998             *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2999             return 0;
3000 
3001         case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
3002 
3003             pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
3004             return 0;
3005 
3006         case PA_SINK_MESSAGE_GET_MAX_REWIND:
3007 
3008             *((size_t*) userdata) = s->thread_info.max_rewind;
3009             return 0;
3010 
3011         case PA_SINK_MESSAGE_GET_LAST_REWIND:
3012 
3013             *((size_t*) userdata) = s->thread_info.last_rewind_nbytes;
3014             return 0;
3015 
3016         case PA_SINK_MESSAGE_GET_MAX_REQUEST:
3017 
3018             *((size_t*) userdata) = s->thread_info.max_request;
3019             return 0;
3020 
3021         case PA_SINK_MESSAGE_SET_MAX_REWIND:
3022 
3023             pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
3024             return 0;
3025 
3026         case PA_SINK_MESSAGE_SET_MAX_REQUEST:
3027 
3028             pa_sink_set_max_request_within_thread(s, (size_t) offset);
3029             return 0;
3030 
3031         case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
3032             /* This message is sent from IO-thread and handled in main thread. */
3033             pa_assert_ctl_context();
3034 
3035             /* Make sure we're not messing with main thread when no longer linked */
3036             if (!PA_SINK_IS_LINKED(s->state))
3037                 return 0;
3038 
3039             pa_sink_get_volume(s, true);
3040             pa_sink_get_mute(s, true);
3041             return 0;
3042 
3043         case PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET:
3044             s->thread_info.port_latency_offset = offset;
3045             return 0;
3046 
3047         case PA_SINK_MESSAGE_GET_LATENCY:
3048         case PA_SINK_MESSAGE_MAX:
3049             ;
3050     }
3051 
3052     return -1;
3053 }
3054 
3055 /* Called from main thread */
pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause)3056 int pa_sink_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
3057     pa_sink *sink;
3058     uint32_t idx;
3059     int ret = 0;
3060 
3061     pa_core_assert_ref(c);
3062     pa_assert_ctl_context();
3063     pa_assert(cause != 0);
3064 
3065     PA_IDXSET_FOREACH(sink, c->sinks, idx) {
3066         int r;
3067 
3068         if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
3069             ret = r;
3070     }
3071 
3072     return ret;
3073 }
3074 
3075 /* Called from IO thread */
pa_sink_detach_within_thread(pa_sink *s)3076 void pa_sink_detach_within_thread(pa_sink *s) {
3077     pa_sink_input *i;
3078     void *state = NULL;
3079 
3080     pa_sink_assert_ref(s);
3081     pa_sink_assert_io_context(s);
3082     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3083 
3084     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3085         pa_sink_input_detach(i);
3086 
3087     if (s->monitor_source)
3088         pa_source_detach_within_thread(s->monitor_source);
3089 }
3090 
3091 /* Called from IO thread */
pa_sink_attach_within_thread(pa_sink *s)3092 void pa_sink_attach_within_thread(pa_sink *s) {
3093     pa_sink_input *i;
3094     void *state = NULL;
3095 
3096     pa_sink_assert_ref(s);
3097     pa_sink_assert_io_context(s);
3098     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3099 
3100     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3101         pa_sink_input_attach(i);
3102 
3103     if (s->monitor_source)
3104         pa_source_attach_within_thread(s->monitor_source);
3105 }
3106 
3107 /* Called from IO thread */
pa_sink_request_rewind(pa_sink*s, size_t nbytes)3108 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
3109     pa_sink_assert_ref(s);
3110     pa_sink_assert_io_context(s);
3111     pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
3112 
3113     if (nbytes == (size_t) -1)
3114         nbytes = s->thread_info.max_rewind;
3115 
3116     nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
3117 
3118     if (s->thread_info.rewind_requested &&
3119         nbytes <= s->thread_info.rewind_nbytes)
3120         return;
3121 
3122     s->thread_info.rewind_nbytes = nbytes;
3123     s->thread_info.rewind_requested = true;
3124 
3125     if (s->request_rewind)
3126         s->request_rewind(s);
3127 }
3128 
3129 /* Called from IO thread */
pa_sink_get_requested_latency_within_thread(pa_sink *s)3130 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
3131     pa_usec_t result = (pa_usec_t) -1;
3132     pa_sink_input *i;
3133     void *state = NULL;
3134     pa_usec_t monitor_latency;
3135 
3136     pa_sink_assert_ref(s);
3137     pa_sink_assert_io_context(s);
3138 
3139     if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3140         return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3141 
3142     if (s->thread_info.requested_latency_valid)
3143         return s->thread_info.requested_latency;
3144 
3145     PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3146         if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3147             (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3148             result = i->thread_info.requested_sink_latency;
3149 
3150     monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3151 
3152     if (monitor_latency != (pa_usec_t) -1 &&
3153         (result == (pa_usec_t) -1 || result > monitor_latency))
3154         result = monitor_latency;
3155 
3156     if (result != (pa_usec_t) -1)
3157         result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3158 
3159     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3160         /* Only cache if properly initialized */
3161         s->thread_info.requested_latency = result;
3162         s->thread_info.requested_latency_valid = true;
3163     }
3164 
3165     return result;
3166 }
3167 
3168 /* Called from main thread */
pa_sink_get_requested_latency(pa_sink *s)3169 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3170     pa_usec_t usec = 0;
3171 
3172     pa_sink_assert_ref(s);
3173     pa_assert_ctl_context();
3174     pa_assert(PA_SINK_IS_LINKED(s->state));
3175 
3176     if (s->state == PA_SINK_SUSPENDED)
3177         return 0;
3178 
3179     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3180 
3181     return usec;
3182 }
3183 
3184 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind)3185 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3186     pa_sink_input *i;
3187     void *state = NULL;
3188 
3189     pa_sink_assert_ref(s);
3190     pa_sink_assert_io_context(s);
3191 
3192     max_rewind = get_max_rewind_limit(s, max_rewind);
3193 
3194     if (max_rewind == s->thread_info.max_rewind)
3195         return;
3196 
3197     s->thread_info.max_rewind = max_rewind;
3198 
3199     if (PA_SINK_IS_LINKED(s->thread_info.state))
3200         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3201             pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3202 
3203     if (s->monitor_source)
3204         pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3205 }
3206 
3207 /* Called from main thread */
pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind)3208 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3209     pa_sink_assert_ref(s);
3210     pa_assert_ctl_context();
3211 
3212     if (PA_SINK_IS_LINKED(s->state))
3213         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3214     else
3215         pa_sink_set_max_rewind_within_thread(s, max_rewind);
3216 }
3217 
3218 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request)3219 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3220     void *state = NULL;
3221 
3222     pa_sink_assert_ref(s);
3223     pa_sink_assert_io_context(s);
3224 
3225     if (max_request == s->thread_info.max_request)
3226         return;
3227 
3228     s->thread_info.max_request = max_request;
3229 
3230     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3231         pa_sink_input *i;
3232 
3233         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3234             pa_sink_input_update_max_request(i, s->thread_info.max_request);
3235     }
3236 }
3237 
3238 /* Called from main thread */
pa_sink_set_max_request(pa_sink *s, size_t max_request)3239 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3240     pa_sink_assert_ref(s);
3241     pa_assert_ctl_context();
3242 
3243     if (PA_SINK_IS_LINKED(s->state))
3244         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3245     else
3246         pa_sink_set_max_request_within_thread(s, max_request);
3247 }
3248 
3249 /* Called from IO thread */
pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic)3250 void pa_sink_invalidate_requested_latency(pa_sink *s, bool dynamic) {
3251     pa_sink_input *i;
3252     void *state = NULL;
3253 
3254     pa_sink_assert_ref(s);
3255     pa_sink_assert_io_context(s);
3256 
3257     if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3258         s->thread_info.requested_latency_valid = false;
3259     else if (dynamic)
3260         return;
3261 
3262     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3263 
3264         if (s->update_requested_latency)
3265             s->update_requested_latency(s);
3266 
3267         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3268             if (i->update_sink_requested_latency)
3269                 i->update_sink_requested_latency(i);
3270     }
3271 }
3272 
3273 /* Called from main thread */
pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency)3274 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3275     pa_sink_assert_ref(s);
3276     pa_assert_ctl_context();
3277 
3278     /* min_latency == 0:           no limit
3279      * min_latency anything else:  specified limit
3280      *
3281      * Similar for max_latency */
3282 
3283     if (min_latency < ABSOLUTE_MIN_LATENCY)
3284         min_latency = ABSOLUTE_MIN_LATENCY;
3285 
3286     if (max_latency <= 0 ||
3287         max_latency > ABSOLUTE_MAX_LATENCY)
3288         max_latency = ABSOLUTE_MAX_LATENCY;
3289 
3290     pa_assert(min_latency <= max_latency);
3291 
3292     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3293     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3294                max_latency == ABSOLUTE_MAX_LATENCY) ||
3295               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3296 
3297     if (PA_SINK_IS_LINKED(s->state)) {
3298         pa_usec_t r[2];
3299 
3300         r[0] = min_latency;
3301         r[1] = max_latency;
3302 
3303         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3304     } else
3305         pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3306 }
3307 
3308 /* Called from main thread */
pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency)3309 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3310     pa_sink_assert_ref(s);
3311     pa_assert_ctl_context();
3312     pa_assert(min_latency);
3313     pa_assert(max_latency);
3314 
3315     if (PA_SINK_IS_LINKED(s->state)) {
3316         pa_usec_t r[2] = { 0, 0 };
3317 
3318         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3319 
3320         *min_latency = r[0];
3321         *max_latency = r[1];
3322     } else {
3323         *min_latency = s->thread_info.min_latency;
3324         *max_latency = s->thread_info.max_latency;
3325     }
3326 }
3327 
3328 /* Called from IO thread */
pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency)3329 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3330     pa_sink_assert_ref(s);
3331     pa_sink_assert_io_context(s);
3332 
3333     pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3334     pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3335     pa_assert(min_latency <= max_latency);
3336 
3337     /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3338     pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3339                max_latency == ABSOLUTE_MAX_LATENCY) ||
3340               (s->flags & PA_SINK_DYNAMIC_LATENCY));
3341 
3342     if (s->thread_info.min_latency == min_latency &&
3343         s->thread_info.max_latency == max_latency)
3344         return;
3345 
3346     s->thread_info.min_latency = min_latency;
3347     s->thread_info.max_latency = max_latency;
3348 
3349     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3350         pa_sink_input *i;
3351         void *state = NULL;
3352 
3353         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3354             if (i->update_sink_latency_range)
3355                 i->update_sink_latency_range(i);
3356     }
3357 
3358     pa_sink_invalidate_requested_latency(s, false);
3359 
3360     pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3361 }
3362 
3363 /* Called from main thread */
pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency)3364 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3365     pa_sink_assert_ref(s);
3366     pa_assert_ctl_context();
3367 
3368     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3369         //pa_assert(latency == 0);
3370         return;
3371     }
3372 
3373     if (latency < ABSOLUTE_MIN_LATENCY)
3374         latency = ABSOLUTE_MIN_LATENCY;
3375 
3376     if (latency > ABSOLUTE_MAX_LATENCY)
3377         latency = ABSOLUTE_MAX_LATENCY;
3378 
3379     if (PA_SINK_IS_LINKED(s->state))
3380         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3381     else
3382         s->thread_info.fixed_latency = latency;
3383 
3384     pa_source_set_fixed_latency(s->monitor_source, latency);
3385 }
3386 
3387 /* Called from main thread */
pa_sink_get_fixed_latency(pa_sink *s)3388 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3389     pa_usec_t latency;
3390 
3391     pa_sink_assert_ref(s);
3392     pa_assert_ctl_context();
3393 
3394     if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3395         return 0;
3396 
3397     if (PA_SINK_IS_LINKED(s->state))
3398         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3399     else
3400         latency = s->thread_info.fixed_latency;
3401 
3402     return latency;
3403 }
3404 
3405 /* Called from IO thread */
pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency)3406 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3407     pa_sink_assert_ref(s);
3408     pa_sink_assert_io_context(s);
3409 
3410     if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3411         pa_assert(latency == 0);
3412         s->thread_info.fixed_latency = 0;
3413 
3414         if (s->monitor_source)
3415             pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3416 
3417         return;
3418     }
3419 
3420     pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3421     pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3422 
3423     if (s->thread_info.fixed_latency == latency)
3424         return;
3425 
3426     s->thread_info.fixed_latency = latency;
3427 
3428     if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3429         pa_sink_input *i;
3430         void *state = NULL;
3431 
3432         PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3433             if (i->update_sink_fixed_latency)
3434                 i->update_sink_fixed_latency(i);
3435     }
3436 
3437     pa_sink_invalidate_requested_latency(s, false);
3438 
3439     pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3440 }
3441 
3442 /* Called from main context */
pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset)3443 void pa_sink_set_port_latency_offset(pa_sink *s, int64_t offset) {
3444     pa_sink_assert_ref(s);
3445 
3446     s->port_latency_offset = offset;
3447 
3448     if (PA_SINK_IS_LINKED(s->state))
3449         pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3450     else
3451         s->thread_info.port_latency_offset = offset;
3452 
3453     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_LATENCY_OFFSET_CHANGED], s);
3454 }
3455 
3456 /* Called from main context */
pa_sink_get_max_rewind(pa_sink *s)3457 size_t pa_sink_get_max_rewind(pa_sink *s) {
3458     size_t r;
3459     pa_assert_ctl_context();
3460     pa_sink_assert_ref(s);
3461 
3462     if (!PA_SINK_IS_LINKED(s->state))
3463         return s->thread_info.max_rewind;
3464 
3465     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3466 
3467     return r;
3468 }
3469 
3470 /* Called from main context */
pa_sink_get_max_request(pa_sink *s)3471 size_t pa_sink_get_max_request(pa_sink *s) {
3472     size_t r;
3473     pa_sink_assert_ref(s);
3474     pa_assert_ctl_context();
3475 
3476     if (!PA_SINK_IS_LINKED(s->state))
3477         return s->thread_info.max_request;
3478 
3479     pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3480 
3481     return r;
3482 }
3483 
3484 /* Called from main context */
pa_sink_set_port(pa_sink *s, const char *name, bool save)3485 int pa_sink_set_port(pa_sink *s, const char *name, bool save) {
3486     pa_device_port *port;
3487 
3488     pa_sink_assert_ref(s);
3489     pa_assert_ctl_context();
3490 
3491     if (!s->set_port) {
3492         pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3493         return -PA_ERR_NOTIMPLEMENTED;
3494     }
3495 
3496     if (!name)
3497         return -PA_ERR_NOENTITY;
3498 
3499     if (!(port = pa_hashmap_get(s->ports, name)))
3500         return -PA_ERR_NOENTITY;
3501 
3502     if (s->active_port == port) {
3503         s->save_port = s->save_port || save;
3504         return 0;
3505     }
3506 
3507     s->port_changing = true;
3508 
3509     if (s->set_port(s, port) < 0)
3510         return -PA_ERR_NOENTITY;
3511 
3512     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3513 
3514     pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3515 
3516     s->active_port = port;
3517     s->save_port = save;
3518 
3519     pa_sink_set_port_latency_offset(s, s->active_port->latency_offset);
3520 
3521     /* The active port affects the default sink selection. */
3522     pa_core_update_default_sink(s->core);
3523 
3524     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3525 
3526     s->port_changing = false;
3527 
3528     return 0;
3529 }
3530 
pa_device_init_icon(pa_proplist *p, bool is_sink)3531 bool pa_device_init_icon(pa_proplist *p, bool is_sink) {
3532     const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3533 
3534     pa_assert(p);
3535 
3536     if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3537         return true;
3538 
3539     if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3540 
3541         if (pa_streq(ff, "microphone"))
3542             t = "audio-input-microphone";
3543         else if (pa_streq(ff, "webcam"))
3544             t = "camera-web";
3545         else if (pa_streq(ff, "computer"))
3546             t = "computer";
3547         else if (pa_streq(ff, "handset"))
3548             t = "phone";
3549         else if (pa_streq(ff, "portable"))
3550             t = "multimedia-player";
3551         else if (pa_streq(ff, "tv"))
3552             t = "video-display";
3553 
3554         /*
3555          * The following icons are not part of the icon naming spec,
3556          * because Rodney Dawes sucks as the maintainer of that spec.
3557          *
3558          * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3559          */
3560         else if (pa_streq(ff, "headset"))
3561             t = "audio-headset";
3562         else if (pa_streq(ff, "headphone"))
3563             t = "audio-headphones";
3564         else if (pa_streq(ff, "speaker"))
3565             t = "audio-speakers";
3566         else if (pa_streq(ff, "hands-free"))
3567             t = "audio-handsfree";
3568     }
3569 
3570     if (!t)
3571         if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3572             if (pa_streq(c, "modem"))
3573                 t = "modem";
3574 
3575     if (!t) {
3576         if (is_sink)
3577             t = "audio-card";
3578         else
3579             t = "audio-input-microphone";
3580     }
3581 
3582     if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3583         if (strstr(profile, "analog"))
3584             s = "-analog";
3585         else if (strstr(profile, "iec958"))
3586             s = "-iec958";
3587         else if (strstr(profile, "hdmi"))
3588             s = "-hdmi";
3589     }
3590 
3591     bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3592 
3593     pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3594 
3595     return true;
3596 }
3597 
pa_device_init_description(pa_proplist *p, pa_card *card)3598 bool pa_device_init_description(pa_proplist *p, pa_card *card) {
3599     const char *s, *d = NULL, *k;
3600     pa_assert(p);
3601 
3602     if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3603         return true;
3604 
3605     if (card)
3606         if ((s = pa_proplist_gets(card->proplist, PA_PROP_DEVICE_DESCRIPTION)))
3607             d = s;
3608 
3609     if (!d)
3610         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3611             if (pa_streq(s, "internal"))
3612                 d = _("Built-in Audio");
3613 
3614     if (!d)
3615         if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3616             if (pa_streq(s, "modem"))
3617                 d = _("Modem");
3618 
3619     if (!d)
3620         d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3621 
3622     if (!d)
3623         return false;
3624 
3625     k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3626 
3627     if (d && k)
3628         pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3629     else if (d)
3630         pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3631 
3632     return true;
3633 }
3634 
pa_device_init_intended_roles(pa_proplist *p)3635 bool pa_device_init_intended_roles(pa_proplist *p) {
3636     const char *s;
3637     pa_assert(p);
3638 
3639     if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3640         return true;
3641 
3642     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3643         if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3644             || pa_streq(s, "headset")) {
3645             pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3646             return true;
3647         }
3648 
3649     return false;
3650 }
3651 
pa_device_init_priority(pa_proplist *p)3652 unsigned pa_device_init_priority(pa_proplist *p) {
3653     const char *s;
3654     unsigned priority = 0;
3655 
3656     pa_assert(p);
3657 
3658     /* JACK sinks and sources get very high priority so that we'll switch the
3659      * default devices automatically when jackd starts and
3660      * module-jackdbus-detect creates the jack sink and source. */
3661     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_API))) {
3662         if (pa_streq(s, "jack"))
3663             priority += 10000;
3664     }
3665 
3666     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3667 
3668         if (pa_streq(s, "sound"))
3669             priority += 9000;
3670         else if (!pa_streq(s, "modem"))
3671             priority += 1000;
3672     }
3673 
3674     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3675 
3676         if (pa_streq(s, "headphone"))
3677             priority += 900;
3678         else if (pa_streq(s, "hifi"))
3679             priority += 600;
3680         else if (pa_streq(s, "speaker"))
3681             priority += 500;
3682         else if (pa_streq(s, "portable"))
3683             priority += 450;
3684     }
3685 
3686     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3687 
3688         if (pa_streq(s, "bluetooth"))
3689             priority += 50;
3690         else if (pa_streq(s, "usb"))
3691             priority += 40;
3692         else if (pa_streq(s, "pci"))
3693             priority += 30;
3694     }
3695 
3696     if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3697 
3698         if (pa_startswith(s, "analog-")) {
3699             priority += 9;
3700 
3701             /* If an analog device has an intended role of "phone", it probably
3702              * co-exists with another device that is meant for everything else,
3703              * and that other device should have higher priority than the phone
3704              * device. */
3705             if (pa_str_in_list_spaces(pa_proplist_gets(p, PA_PROP_DEVICE_INTENDED_ROLES), "phone"))
3706                 priority -= 1;
3707         }
3708         else if (pa_startswith(s, "iec958-"))
3709             priority += 7;
3710     }
3711 
3712     return priority;
3713 }
3714 
3715 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3716 
3717 /* Called from the IO thread. */
pa_sink_volume_change_new(pa_sink *s)3718 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3719     pa_sink_volume_change *c;
3720     if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3721         c = pa_xnew(pa_sink_volume_change, 1);
3722 
3723     PA_LLIST_INIT(pa_sink_volume_change, c);
3724     c->at = 0;
3725     pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3726     return c;
3727 }
3728 
3729 /* Called from the IO thread. */
pa_sink_volume_change_free(pa_sink_volume_change *c)3730 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3731     pa_assert(c);
3732     if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3733         pa_xfree(c);
3734 }
3735 
3736 /* Called from the IO thread. */
pa_sink_volume_change_push(pa_sink *s)3737 void pa_sink_volume_change_push(pa_sink *s) {
3738     pa_sink_volume_change *c = NULL;
3739     pa_sink_volume_change *nc = NULL;
3740     pa_sink_volume_change *pc = NULL;
3741     uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3742 
3743     const char *direction = NULL;
3744 
3745     pa_assert(s);
3746     nc = pa_sink_volume_change_new(s);
3747 
3748     /* NOTE: There is already more different volumes in pa_sink that I can remember.
3749      *       Adding one more volume for HW would get us rid of this, but I am trying
3750      *       to survive with the ones we already have. */
3751     pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3752 
3753     if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3754         pa_log_debug("Volume not changing");
3755         pa_sink_volume_change_free(nc);
3756         return;
3757     }
3758 
3759     nc->at = pa_sink_get_latency_within_thread(s, false);
3760     nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3761 
3762     if (s->thread_info.volume_changes_tail) {
3763         for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3764             /* If volume is going up let's do it a bit late. If it is going
3765              * down let's do it a bit early. */
3766             if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3767                 if (nc->at + safety_margin > c->at) {
3768                     nc->at += safety_margin;
3769                     direction = "up";
3770                     break;
3771                 }
3772             }
3773             else if (nc->at - safety_margin > c->at) {
3774                     nc->at -= safety_margin;
3775                     direction = "down";
3776                     break;
3777             }
3778         }
3779     }
3780 
3781     if (c == NULL) {
3782         if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3783             nc->at += safety_margin;
3784             direction = "up";
3785         } else {
3786             nc->at -= safety_margin;
3787             direction = "down";
3788         }
3789         PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3790     }
3791     else {
3792         PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3793     }
3794 
3795     pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3796 
3797     /* We can ignore volume events that came earlier but should happen later than this. */
3798     PA_LLIST_FOREACH_SAFE(c, pc, nc->next) {
3799         pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3800         pa_sink_volume_change_free(c);
3801     }
3802     nc->next = NULL;
3803     s->thread_info.volume_changes_tail = nc;
3804 }
3805 
3806 /* Called from the IO thread. */
pa_sink_volume_change_flush(pa_sink *s)3807 static void pa_sink_volume_change_flush(pa_sink *s) {
3808     pa_sink_volume_change *c = s->thread_info.volume_changes;
3809     pa_assert(s);
3810     s->thread_info.volume_changes = NULL;
3811     s->thread_info.volume_changes_tail = NULL;
3812     while (c) {
3813         pa_sink_volume_change *next = c->next;
3814         pa_sink_volume_change_free(c);
3815         c = next;
3816     }
3817 }
3818 
3819 /* Called from the IO thread. */
pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next)3820 bool pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3821     pa_usec_t now;
3822     bool ret = false;
3823 
3824     pa_assert(s);
3825 
3826     if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3827         if (usec_to_next)
3828             *usec_to_next = 0;
3829         return ret;
3830     }
3831 
3832     pa_assert(s->write_volume);
3833 
3834     now = pa_rtclock_now();
3835 
3836     while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3837         pa_sink_volume_change *c = s->thread_info.volume_changes;
3838         PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3839         pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3840                      pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3841         ret = true;
3842         s->thread_info.current_hw_volume = c->hw_volume;
3843         pa_sink_volume_change_free(c);
3844     }
3845 
3846     if (ret)
3847         s->write_volume(s);
3848 
3849     if (s->thread_info.volume_changes) {
3850         if (usec_to_next)
3851             *usec_to_next = s->thread_info.volume_changes->at - now;
3852         if (pa_log_ratelimit(PA_LOG_DEBUG))
3853             pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3854     }
3855     else {
3856         if (usec_to_next)
3857             *usec_to_next = 0;
3858         s->thread_info.volume_changes_tail = NULL;
3859     }
3860     return ret;
3861 }
3862 
3863 /* Called from the IO thread. */
pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes)3864 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3865     /* All the queued volume events later than current latency are shifted to happen earlier. */
3866     pa_sink_volume_change *c;
3867     pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3868     pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3869     pa_usec_t limit = pa_sink_get_latency_within_thread(s, false);
3870 
3871     pa_log_debug("latency = %lld", (long long) limit);
3872     limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3873 
3874     PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3875         pa_usec_t modified_limit = limit;
3876         if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3877             modified_limit -= s->thread_info.volume_change_safety_margin;
3878         else
3879             modified_limit += s->thread_info.volume_change_safety_margin;
3880         if (c->at > modified_limit) {
3881             c->at -= rewound;
3882             if (c->at < modified_limit)
3883                 c->at = modified_limit;
3884         }
3885         prev_vol = pa_cvolume_avg(&c->hw_volume);
3886     }
3887     pa_sink_volume_change_apply(s, NULL);
3888 }
3889 
3890 /* Called from the main thread */
3891 /* Gets the list of formats supported by the sink. The members and idxset must
3892  * be freed by the caller. */
pa_sink_get_formats(pa_sink *s)3893 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3894     pa_idxset *ret;
3895 
3896     pa_assert(s);
3897 
3898     if (s->get_formats) {
3899         /* Sink supports format query, all is good */
3900         ret = s->get_formats(s);
3901     } else {
3902         /* Sink doesn't support format query, so assume it does PCM */
3903         pa_format_info *f = pa_format_info_new();
3904         f->encoding = PA_ENCODING_PCM;
3905 
3906         ret = pa_idxset_new(NULL, NULL);
3907         pa_idxset_put(ret, f, NULL);
3908     }
3909 
3910     return ret;
3911 }
3912 
3913 /* Called from the main thread */
3914 /* Allows an external source to set what formats a sink supports if the sink
3915  * permits this. The function makes a copy of the formats on success. */
pa_sink_set_formats(pa_sink *s, pa_idxset *formats)3916 bool pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3917     pa_assert(s);
3918     pa_assert(formats);
3919 
3920     if (s->set_formats)
3921         /* Sink supports setting formats -- let's give it a shot */
3922         return s->set_formats(s, formats);
3923     else
3924         /* Sink doesn't support setting this -- bail out */
3925         return false;
3926 }
3927 
3928 /* Called from the main thread */
3929 /* Checks if the sink can accept this format */
pa_sink_check_format(pa_sink *s, pa_format_info *f)3930 bool pa_sink_check_format(pa_sink *s, pa_format_info *f) {
3931     pa_idxset *formats = NULL;
3932     bool ret = false;
3933 
3934     pa_assert(s);
3935     pa_assert(f);
3936 
3937     formats = pa_sink_get_formats(s);
3938 
3939     if (formats) {
3940         pa_format_info *finfo_device;
3941         uint32_t i;
3942 
3943         PA_IDXSET_FOREACH(finfo_device, formats, i) {
3944             if (pa_format_info_is_compatible(finfo_device, f)) {
3945                 ret = true;
3946                 break;
3947             }
3948         }
3949 
3950         pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3951     }
3952 
3953     return ret;
3954 }
3955 
3956 /* Called from the main thread */
3957 /* Calculates the intersection between formats supported by the sink and
3958  * in_formats, and returns these, in the order of the sink's formats. */
pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats)3959 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3960     pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3961     pa_format_info *f_sink, *f_in;
3962     uint32_t i, j;
3963 
3964     pa_assert(s);
3965 
3966     if (!in_formats || pa_idxset_isempty(in_formats))
3967         goto done;
3968 
3969     sink_formats = pa_sink_get_formats(s);
3970 
3971     PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3972         PA_IDXSET_FOREACH(f_in, in_formats, j) {
3973             if (pa_format_info_is_compatible(f_sink, f_in))
3974                 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3975         }
3976     }
3977 
3978 done:
3979     if (sink_formats)
3980         pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3981 
3982     return out_formats;
3983 }
3984 
3985 /* Called from the main thread */
pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format)3986 void pa_sink_set_sample_format(pa_sink *s, pa_sample_format_t format) {
3987     pa_sample_format_t old_format;
3988 
3989     pa_assert(s);
3990     pa_assert(pa_sample_format_valid(format));
3991 
3992     old_format = s->sample_spec.format;
3993     if (old_format == format)
3994         return;
3995 
3996     pa_log_info("%s: format: %s -> %s",
3997                 s->name, pa_sample_format_to_string(old_format), pa_sample_format_to_string(format));
3998 
3999     s->sample_spec.format = format;
4000 
4001     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4002 }
4003 
4004 /* Called from the main thread */
pa_sink_set_sample_rate(pa_sink *s, uint32_t rate)4005 void pa_sink_set_sample_rate(pa_sink *s, uint32_t rate) {
4006     uint32_t old_rate;
4007 
4008     pa_assert(s);
4009     pa_assert(pa_sample_rate_valid(rate));
4010 
4011     old_rate = s->sample_spec.rate;
4012     if (old_rate == rate)
4013         return;
4014 
4015     pa_log_info("%s: rate: %u -> %u", s->name, old_rate, rate);
4016 
4017     s->sample_spec.rate = rate;
4018 
4019     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4020 }
4021 
4022 /* Called from the main thread. */
pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume)4023 void pa_sink_set_reference_volume_direct(pa_sink *s, const pa_cvolume *volume) {
4024     pa_cvolume old_volume;
4025     char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4026     char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
4027 
4028     pa_assert(s);
4029     pa_assert(volume);
4030 
4031     old_volume = s->reference_volume;
4032 
4033     if (pa_cvolume_equal(volume, &old_volume))
4034         return;
4035 
4036     s->reference_volume = *volume;
4037     pa_log_debug("The reference volume of sink %s changed from %s to %s.", s->name,
4038                  pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
4039                                             s->flags & PA_SINK_DECIBEL_VOLUME),
4040                  pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
4041                                             s->flags & PA_SINK_DECIBEL_VOLUME));
4042 
4043     pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
4044     pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_VOLUME_CHANGED], s);
4045 }
4046 
pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed)4047 void pa_sink_move_streams_to_default_sink(pa_core *core, pa_sink *old_sink, bool default_sink_changed) {
4048     pa_sink_input *i;
4049     uint32_t idx;
4050 
4051     pa_assert(core);
4052     pa_assert(old_sink);
4053 
4054     if (core->state == PA_CORE_SHUTDOWN)
4055         return;
4056 
4057     if (core->default_sink == NULL || core->default_sink->unlink_requested)
4058         return;
4059 
4060     if (old_sink == core->default_sink)
4061         return;
4062 
4063     PA_IDXSET_FOREACH(i, old_sink->inputs, idx) {
4064         if (!PA_SINK_INPUT_IS_LINKED(i->state))
4065             continue;
4066 
4067         if (!i->sink)
4068             continue;
4069 
4070         /* Don't move sink-inputs which connect filter sinks to their target sinks */
4071         if (i->origin_sink)
4072             continue;
4073 
4074         /* If default_sink_changed is false, the old sink became unavailable, so all streams must be moved. */
4075         if (pa_safe_streq(old_sink->name, i->preferred_sink) && default_sink_changed)
4076             continue;
4077 
4078         if (!pa_sink_input_may_move_to(i, core->default_sink))
4079             continue;
4080 
4081         if (default_sink_changed)
4082             pa_log_info("The sink input %u \"%s\" is moving to %s due to change of the default sink.",
4083                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4084         else
4085             pa_log_info("The sink input %u \"%s\" is moving to %s, because the old sink became unavailable.",
4086                         i->index, pa_strnull(pa_proplist_gets(i->proplist, PA_PROP_APPLICATION_NAME)), core->default_sink->name);
4087 
4088         pa_sink_input_move_to(i, core->default_sink, false);
4089     }
4090 }
4091