xref: /kernel/linux/linux-6.6/sound/core/pcm_lib.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  Digital Audio (PCM) abstract layer
4 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 *                   Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36
37static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38				       snd_pcm_uframes_t ptr,
39				       snd_pcm_uframes_t new_ptr)
40{
41	snd_pcm_sframes_t delta;
42
43	delta = new_ptr - ptr;
44	if (delta == 0)
45		return;
46	if (delta < 0)
47		delta += runtime->boundary;
48	if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49		runtime->silence_filled -= delta;
50	else
51		runtime->silence_filled = 0;
52	runtime->silence_start = new_ptr;
53}
54
55/*
56 * fill ring buffer with silence
57 * runtime->silence_start: starting pointer to silence area
58 * runtime->silence_filled: size filled with silence
59 * runtime->silence_threshold: threshold from application
60 * runtime->silence_size: maximal size from application
61 *
62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
63 */
64void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
65{
66	struct snd_pcm_runtime *runtime = substream->runtime;
67	snd_pcm_uframes_t frames, ofs, transfer;
68	int err;
69
70	if (runtime->silence_size < runtime->boundary) {
71		snd_pcm_sframes_t noise_dist;
72		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73		update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74		/* initialization outside pointer updates */
75		if (new_hw_ptr == ULONG_MAX)
76			new_hw_ptr = runtime->status->hw_ptr;
77		/* get hw_avail with the boundary crossing */
78		noise_dist = appl_ptr - new_hw_ptr;
79		if (noise_dist < 0)
80			noise_dist += runtime->boundary;
81		/* total noise distance */
82		noise_dist += runtime->silence_filled;
83		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
84			return;
85		frames = runtime->silence_threshold - noise_dist;
86		if (frames > runtime->silence_size)
87			frames = runtime->silence_size;
88	} else {
89		/*
90		 * This filling mode aims at free-running mode (used for example by dmix),
91		 * which doesn't update the application pointer.
92		 */
93		snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94		if (new_hw_ptr == ULONG_MAX) {
95			/*
96			 * Initialization, fill the whole unused buffer with silence.
97			 *
98			 * Usually, this is entered while stopped, before data is queued,
99			 * so both pointers are expected to be zero.
100			 */
101			snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
102			if (avail < 0)
103				avail += runtime->boundary;
104			/*
105			 * In free-running mode, appl_ptr will be zero even while running,
106			 * so we end up with a huge number. There is no useful way to
107			 * handle this, so we just clear the whole buffer.
108			 */
109			runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110			runtime->silence_start = hw_ptr;
111		} else {
112			/* Silence the just played area immediately */
113			update_silence_vars(runtime, hw_ptr, new_hw_ptr);
114		}
115		/*
116		 * In this mode, silence_filled actually includes the valid
117		 * sample data from the user.
118		 */
119		frames = runtime->buffer_size - runtime->silence_filled;
120	}
121	if (snd_BUG_ON(frames > runtime->buffer_size))
122		return;
123	if (frames == 0)
124		return;
125	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
126	do {
127		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128		err = fill_silence_frames(substream, ofs, transfer);
129		snd_BUG_ON(err < 0);
130		runtime->silence_filled += transfer;
131		frames -= transfer;
132		ofs = 0;
133	} while (frames > 0);
134	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
135}
136
137#ifdef CONFIG_SND_DEBUG
138void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139			   char *name, size_t len)
140{
141	snprintf(name, len, "pcmC%dD%d%c:%d",
142		 substream->pcm->card->number,
143		 substream->pcm->device,
144		 substream->stream ? 'c' : 'p',
145		 substream->number);
146}
147EXPORT_SYMBOL(snd_pcm_debug_name);
148#endif
149
150#define XRUN_DEBUG_BASIC	(1<<0)
151#define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
152#define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
153
154#ifdef CONFIG_SND_PCM_XRUN_DEBUG
155
156#define xrun_debug(substream, mask) \
157			((substream)->pstr->xrun_debug & (mask))
158#else
159#define xrun_debug(substream, mask)	0
160#endif
161
162#define dump_stack_on_xrun(substream) do {			\
163		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
164			dump_stack();				\
165	} while (0)
166
167/* call with stream lock held */
168void __snd_pcm_xrun(struct snd_pcm_substream *substream)
169{
170	struct snd_pcm_runtime *runtime = substream->runtime;
171
172	trace_xrun(substream);
173	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174		struct timespec64 tstamp;
175
176		snd_pcm_gettime(runtime, &tstamp);
177		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
179	}
180	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
182		char name[16];
183		snd_pcm_debug_name(substream, name, sizeof(name));
184		pcm_warn(substream->pcm, "XRUN: %s\n", name);
185		dump_stack_on_xrun(substream);
186	}
187}
188
189#ifdef CONFIG_SND_PCM_XRUN_DEBUG
190#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
191	do {								\
192		trace_hw_ptr_error(substream, reason);	\
193		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
194			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
195					   (in_interrupt) ? 'Q' : 'P', ##args);	\
196			dump_stack_on_xrun(substream);			\
197		}							\
198	} while (0)
199
200#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
201
202#define hw_ptr_error(substream, fmt, args...) do { } while (0)
203
204#endif
205
206int snd_pcm_update_state(struct snd_pcm_substream *substream,
207			 struct snd_pcm_runtime *runtime)
208{
209	snd_pcm_uframes_t avail;
210
211	avail = snd_pcm_avail(substream);
212	if (avail > runtime->avail_max)
213		runtime->avail_max = avail;
214	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
215		if (avail >= runtime->buffer_size) {
216			snd_pcm_drain_done(substream);
217			return -EPIPE;
218		}
219	} else {
220		if (avail >= runtime->stop_threshold) {
221			__snd_pcm_xrun(substream);
222			return -EPIPE;
223		}
224	}
225	if (runtime->twake) {
226		if (avail >= runtime->twake)
227			wake_up(&runtime->tsleep);
228	} else if (avail >= runtime->control->avail_min)
229		wake_up(&runtime->sleep);
230	return 0;
231}
232
233static void update_audio_tstamp(struct snd_pcm_substream *substream,
234				struct timespec64 *curr_tstamp,
235				struct timespec64 *audio_tstamp)
236{
237	struct snd_pcm_runtime *runtime = substream->runtime;
238	u64 audio_frames, audio_nsecs;
239	struct timespec64 driver_tstamp;
240
241	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
242		return;
243
244	if (!(substream->ops->get_time_info) ||
245		(runtime->audio_tstamp_report.actual_type ==
246			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
247
248		/*
249		 * provide audio timestamp derived from pointer position
250		 * add delay only if requested
251		 */
252
253		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
254
255		if (runtime->audio_tstamp_config.report_delay) {
256			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
257				audio_frames -=  runtime->delay;
258			else
259				audio_frames +=  runtime->delay;
260		}
261		audio_nsecs = div_u64(audio_frames * 1000000000LL,
262				runtime->rate);
263		*audio_tstamp = ns_to_timespec64(audio_nsecs);
264	}
265
266	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
267	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
268		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
269		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
270		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
271		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
272	}
273
274
275	/*
276	 * re-take a driver timestamp to let apps detect if the reference tstamp
277	 * read by low-level hardware was provided with a delay
278	 */
279	snd_pcm_gettime(substream->runtime, &driver_tstamp);
280	runtime->driver_tstamp = driver_tstamp;
281}
282
283static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
284				  unsigned int in_interrupt)
285{
286	struct snd_pcm_runtime *runtime = substream->runtime;
287	snd_pcm_uframes_t pos;
288	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
289	snd_pcm_sframes_t hdelta, delta;
290	unsigned long jdelta;
291	unsigned long curr_jiffies;
292	struct timespec64 curr_tstamp;
293	struct timespec64 audio_tstamp;
294	int crossed_boundary = 0;
295
296	old_hw_ptr = runtime->status->hw_ptr;
297
298	/*
299	 * group pointer, time and jiffies reads to allow for more
300	 * accurate correlations/corrections.
301	 * The values are stored at the end of this routine after
302	 * corrections for hw_ptr position
303	 */
304	pos = substream->ops->pointer(substream);
305	curr_jiffies = jiffies;
306	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
307		if ((substream->ops->get_time_info) &&
308			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
309			substream->ops->get_time_info(substream, &curr_tstamp,
310						&audio_tstamp,
311						&runtime->audio_tstamp_config,
312						&runtime->audio_tstamp_report);
313
314			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
315			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
316				snd_pcm_gettime(runtime, &curr_tstamp);
317		} else
318			snd_pcm_gettime(runtime, &curr_tstamp);
319	}
320
321	if (pos == SNDRV_PCM_POS_XRUN) {
322		__snd_pcm_xrun(substream);
323		return -EPIPE;
324	}
325	if (pos >= runtime->buffer_size) {
326		if (printk_ratelimit()) {
327			char name[16];
328			snd_pcm_debug_name(substream, name, sizeof(name));
329			pcm_err(substream->pcm,
330				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
331				name, pos, runtime->buffer_size,
332				runtime->period_size);
333		}
334		pos = 0;
335	}
336	pos -= pos % runtime->min_align;
337	trace_hwptr(substream, pos, in_interrupt);
338	hw_base = runtime->hw_ptr_base;
339	new_hw_ptr = hw_base + pos;
340	if (in_interrupt) {
341		/* we know that one period was processed */
342		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
343		delta = runtime->hw_ptr_interrupt + runtime->period_size;
344		if (delta > new_hw_ptr) {
345			/* check for double acknowledged interrupts */
346			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
347			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
348				hw_base += runtime->buffer_size;
349				if (hw_base >= runtime->boundary) {
350					hw_base = 0;
351					crossed_boundary++;
352				}
353				new_hw_ptr = hw_base + pos;
354				goto __delta;
355			}
356		}
357	}
358	/* new_hw_ptr might be lower than old_hw_ptr in case when */
359	/* pointer crosses the end of the ring buffer */
360	if (new_hw_ptr < old_hw_ptr) {
361		hw_base += runtime->buffer_size;
362		if (hw_base >= runtime->boundary) {
363			hw_base = 0;
364			crossed_boundary++;
365		}
366		new_hw_ptr = hw_base + pos;
367	}
368      __delta:
369	delta = new_hw_ptr - old_hw_ptr;
370	if (delta < 0)
371		delta += runtime->boundary;
372
373	if (runtime->no_period_wakeup) {
374		snd_pcm_sframes_t xrun_threshold;
375		/*
376		 * Without regular period interrupts, we have to check
377		 * the elapsed time to detect xruns.
378		 */
379		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
380		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
381			goto no_delta_check;
382		hdelta = jdelta - delta * HZ / runtime->rate;
383		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
384		while (hdelta > xrun_threshold) {
385			delta += runtime->buffer_size;
386			hw_base += runtime->buffer_size;
387			if (hw_base >= runtime->boundary) {
388				hw_base = 0;
389				crossed_boundary++;
390			}
391			new_hw_ptr = hw_base + pos;
392			hdelta -= runtime->hw_ptr_buffer_jiffies;
393		}
394		goto no_delta_check;
395	}
396
397	/* something must be really wrong */
398	if (delta >= runtime->buffer_size + runtime->period_size) {
399		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
400			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
401			     substream->stream, (long)pos,
402			     (long)new_hw_ptr, (long)old_hw_ptr);
403		return 0;
404	}
405
406	/* Do jiffies check only in xrun_debug mode */
407	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
408		goto no_jiffies_check;
409
410	/* Skip the jiffies check for hardwares with BATCH flag.
411	 * Such hardware usually just increases the position at each IRQ,
412	 * thus it can't give any strange position.
413	 */
414	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
415		goto no_jiffies_check;
416	hdelta = delta;
417	if (hdelta < runtime->delay)
418		goto no_jiffies_check;
419	hdelta -= runtime->delay;
420	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
421	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
422		delta = jdelta /
423			(((runtime->period_size * HZ) / runtime->rate)
424								+ HZ/100);
425		/* move new_hw_ptr according jiffies not pos variable */
426		new_hw_ptr = old_hw_ptr;
427		hw_base = delta;
428		/* use loop to avoid checks for delta overflows */
429		/* the delta value is small or zero in most cases */
430		while (delta > 0) {
431			new_hw_ptr += runtime->period_size;
432			if (new_hw_ptr >= runtime->boundary) {
433				new_hw_ptr -= runtime->boundary;
434				crossed_boundary--;
435			}
436			delta--;
437		}
438		/* align hw_base to buffer_size */
439		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
440			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
441			     (long)pos, (long)hdelta,
442			     (long)runtime->period_size, jdelta,
443			     ((hdelta * HZ) / runtime->rate), hw_base,
444			     (unsigned long)old_hw_ptr,
445			     (unsigned long)new_hw_ptr);
446		/* reset values to proper state */
447		delta = 0;
448		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
449	}
450 no_jiffies_check:
451	if (delta > runtime->period_size + runtime->period_size / 2) {
452		hw_ptr_error(substream, in_interrupt,
453			     "Lost interrupts?",
454			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
455			     substream->stream, (long)delta,
456			     (long)new_hw_ptr,
457			     (long)old_hw_ptr);
458	}
459
460 no_delta_check:
461	if (runtime->status->hw_ptr == new_hw_ptr) {
462		runtime->hw_ptr_jiffies = curr_jiffies;
463		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
464		return 0;
465	}
466
467	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
468	    runtime->silence_size > 0)
469		snd_pcm_playback_silence(substream, new_hw_ptr);
470
471	if (in_interrupt) {
472		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
473		if (delta < 0)
474			delta += runtime->boundary;
475		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
476		runtime->hw_ptr_interrupt += delta;
477		if (runtime->hw_ptr_interrupt >= runtime->boundary)
478			runtime->hw_ptr_interrupt -= runtime->boundary;
479	}
480	runtime->hw_ptr_base = hw_base;
481	runtime->status->hw_ptr = new_hw_ptr;
482	runtime->hw_ptr_jiffies = curr_jiffies;
483	if (crossed_boundary) {
484		snd_BUG_ON(crossed_boundary != 1);
485		runtime->hw_ptr_wrap += runtime->boundary;
486	}
487
488	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
489
490	return snd_pcm_update_state(substream, runtime);
491}
492
493/* CAUTION: call it with irq disabled */
494int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
495{
496	return snd_pcm_update_hw_ptr0(substream, 0);
497}
498
499/**
500 * snd_pcm_set_ops - set the PCM operators
501 * @pcm: the pcm instance
502 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
503 * @ops: the operator table
504 *
505 * Sets the given PCM operators to the pcm instance.
506 */
507void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
508		     const struct snd_pcm_ops *ops)
509{
510	struct snd_pcm_str *stream = &pcm->streams[direction];
511	struct snd_pcm_substream *substream;
512
513	for (substream = stream->substream; substream != NULL; substream = substream->next)
514		substream->ops = ops;
515}
516EXPORT_SYMBOL(snd_pcm_set_ops);
517
518/**
519 * snd_pcm_set_sync - set the PCM sync id
520 * @substream: the pcm substream
521 *
522 * Sets the PCM sync identifier for the card.
523 */
524void snd_pcm_set_sync(struct snd_pcm_substream *substream)
525{
526	struct snd_pcm_runtime *runtime = substream->runtime;
527
528	runtime->sync.id32[0] = substream->pcm->card->number;
529	runtime->sync.id32[1] = -1;
530	runtime->sync.id32[2] = -1;
531	runtime->sync.id32[3] = -1;
532}
533EXPORT_SYMBOL(snd_pcm_set_sync);
534
535/*
536 *  Standard ioctl routine
537 */
538
539static inline unsigned int div32(unsigned int a, unsigned int b,
540				 unsigned int *r)
541{
542	if (b == 0) {
543		*r = 0;
544		return UINT_MAX;
545	}
546	*r = a % b;
547	return a / b;
548}
549
550static inline unsigned int div_down(unsigned int a, unsigned int b)
551{
552	if (b == 0)
553		return UINT_MAX;
554	return a / b;
555}
556
557static inline unsigned int div_up(unsigned int a, unsigned int b)
558{
559	unsigned int r;
560	unsigned int q;
561	if (b == 0)
562		return UINT_MAX;
563	q = div32(a, b, &r);
564	if (r)
565		++q;
566	return q;
567}
568
569static inline unsigned int mul(unsigned int a, unsigned int b)
570{
571	if (a == 0)
572		return 0;
573	if (div_down(UINT_MAX, a) < b)
574		return UINT_MAX;
575	return a * b;
576}
577
578static inline unsigned int muldiv32(unsigned int a, unsigned int b,
579				    unsigned int c, unsigned int *r)
580{
581	u_int64_t n = (u_int64_t) a * b;
582	if (c == 0) {
583		*r = 0;
584		return UINT_MAX;
585	}
586	n = div_u64_rem(n, c, r);
587	if (n >= UINT_MAX) {
588		*r = 0;
589		return UINT_MAX;
590	}
591	return n;
592}
593
594/**
595 * snd_interval_refine - refine the interval value of configurator
596 * @i: the interval value to refine
597 * @v: the interval value to refer to
598 *
599 * Refines the interval value with the reference value.
600 * The interval is changed to the range satisfying both intervals.
601 * The interval status (min, max, integer, etc.) are evaluated.
602 *
603 * Return: Positive if the value is changed, zero if it's not changed, or a
604 * negative error code.
605 */
606int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
607{
608	int changed = 0;
609	if (snd_BUG_ON(snd_interval_empty(i)))
610		return -EINVAL;
611	if (i->min < v->min) {
612		i->min = v->min;
613		i->openmin = v->openmin;
614		changed = 1;
615	} else if (i->min == v->min && !i->openmin && v->openmin) {
616		i->openmin = 1;
617		changed = 1;
618	}
619	if (i->max > v->max) {
620		i->max = v->max;
621		i->openmax = v->openmax;
622		changed = 1;
623	} else if (i->max == v->max && !i->openmax && v->openmax) {
624		i->openmax = 1;
625		changed = 1;
626	}
627	if (!i->integer && v->integer) {
628		i->integer = 1;
629		changed = 1;
630	}
631	if (i->integer) {
632		if (i->openmin) {
633			i->min++;
634			i->openmin = 0;
635		}
636		if (i->openmax) {
637			i->max--;
638			i->openmax = 0;
639		}
640	} else if (!i->openmin && !i->openmax && i->min == i->max)
641		i->integer = 1;
642	if (snd_interval_checkempty(i)) {
643		snd_interval_none(i);
644		return -EINVAL;
645	}
646	return changed;
647}
648EXPORT_SYMBOL(snd_interval_refine);
649
650static int snd_interval_refine_first(struct snd_interval *i)
651{
652	const unsigned int last_max = i->max;
653
654	if (snd_BUG_ON(snd_interval_empty(i)))
655		return -EINVAL;
656	if (snd_interval_single(i))
657		return 0;
658	i->max = i->min;
659	if (i->openmin)
660		i->max++;
661	/* only exclude max value if also excluded before refine */
662	i->openmax = (i->openmax && i->max >= last_max);
663	return 1;
664}
665
666static int snd_interval_refine_last(struct snd_interval *i)
667{
668	const unsigned int last_min = i->min;
669
670	if (snd_BUG_ON(snd_interval_empty(i)))
671		return -EINVAL;
672	if (snd_interval_single(i))
673		return 0;
674	i->min = i->max;
675	if (i->openmax)
676		i->min--;
677	/* only exclude min value if also excluded before refine */
678	i->openmin = (i->openmin && i->min <= last_min);
679	return 1;
680}
681
682void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
683{
684	if (a->empty || b->empty) {
685		snd_interval_none(c);
686		return;
687	}
688	c->empty = 0;
689	c->min = mul(a->min, b->min);
690	c->openmin = (a->openmin || b->openmin);
691	c->max = mul(a->max,  b->max);
692	c->openmax = (a->openmax || b->openmax);
693	c->integer = (a->integer && b->integer);
694}
695
696/**
697 * snd_interval_div - refine the interval value with division
698 * @a: dividend
699 * @b: divisor
700 * @c: quotient
701 *
702 * c = a / b
703 *
704 * Returns non-zero if the value is changed, zero if not changed.
705 */
706void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
707{
708	unsigned int r;
709	if (a->empty || b->empty) {
710		snd_interval_none(c);
711		return;
712	}
713	c->empty = 0;
714	c->min = div32(a->min, b->max, &r);
715	c->openmin = (r || a->openmin || b->openmax);
716	if (b->min > 0) {
717		c->max = div32(a->max, b->min, &r);
718		if (r) {
719			c->max++;
720			c->openmax = 1;
721		} else
722			c->openmax = (a->openmax || b->openmin);
723	} else {
724		c->max = UINT_MAX;
725		c->openmax = 0;
726	}
727	c->integer = 0;
728}
729
730/**
731 * snd_interval_muldivk - refine the interval value
732 * @a: dividend 1
733 * @b: dividend 2
734 * @k: divisor (as integer)
735 * @c: result
736  *
737 * c = a * b / k
738 *
739 * Returns non-zero if the value is changed, zero if not changed.
740 */
741void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
742		      unsigned int k, struct snd_interval *c)
743{
744	unsigned int r;
745	if (a->empty || b->empty) {
746		snd_interval_none(c);
747		return;
748	}
749	c->empty = 0;
750	c->min = muldiv32(a->min, b->min, k, &r);
751	c->openmin = (r || a->openmin || b->openmin);
752	c->max = muldiv32(a->max, b->max, k, &r);
753	if (r) {
754		c->max++;
755		c->openmax = 1;
756	} else
757		c->openmax = (a->openmax || b->openmax);
758	c->integer = 0;
759}
760
761/**
762 * snd_interval_mulkdiv - refine the interval value
763 * @a: dividend 1
764 * @k: dividend 2 (as integer)
765 * @b: divisor
766 * @c: result
767 *
768 * c = a * k / b
769 *
770 * Returns non-zero if the value is changed, zero if not changed.
771 */
772void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
773		      const struct snd_interval *b, struct snd_interval *c)
774{
775	unsigned int r;
776	if (a->empty || b->empty) {
777		snd_interval_none(c);
778		return;
779	}
780	c->empty = 0;
781	c->min = muldiv32(a->min, k, b->max, &r);
782	c->openmin = (r || a->openmin || b->openmax);
783	if (b->min > 0) {
784		c->max = muldiv32(a->max, k, b->min, &r);
785		if (r) {
786			c->max++;
787			c->openmax = 1;
788		} else
789			c->openmax = (a->openmax || b->openmin);
790	} else {
791		c->max = UINT_MAX;
792		c->openmax = 0;
793	}
794	c->integer = 0;
795}
796
797/* ---- */
798
799
800/**
801 * snd_interval_ratnum - refine the interval value
802 * @i: interval to refine
803 * @rats_count: number of ratnum_t
804 * @rats: ratnum_t array
805 * @nump: pointer to store the resultant numerator
806 * @denp: pointer to store the resultant denominator
807 *
808 * Return: Positive if the value is changed, zero if it's not changed, or a
809 * negative error code.
810 */
811int snd_interval_ratnum(struct snd_interval *i,
812			unsigned int rats_count, const struct snd_ratnum *rats,
813			unsigned int *nump, unsigned int *denp)
814{
815	unsigned int best_num, best_den;
816	int best_diff;
817	unsigned int k;
818	struct snd_interval t;
819	int err;
820	unsigned int result_num, result_den;
821	int result_diff;
822
823	best_num = best_den = best_diff = 0;
824	for (k = 0; k < rats_count; ++k) {
825		unsigned int num = rats[k].num;
826		unsigned int den;
827		unsigned int q = i->min;
828		int diff;
829		if (q == 0)
830			q = 1;
831		den = div_up(num, q);
832		if (den < rats[k].den_min)
833			continue;
834		if (den > rats[k].den_max)
835			den = rats[k].den_max;
836		else {
837			unsigned int r;
838			r = (den - rats[k].den_min) % rats[k].den_step;
839			if (r != 0)
840				den -= r;
841		}
842		diff = num - q * den;
843		if (diff < 0)
844			diff = -diff;
845		if (best_num == 0 ||
846		    diff * best_den < best_diff * den) {
847			best_diff = diff;
848			best_den = den;
849			best_num = num;
850		}
851	}
852	if (best_den == 0) {
853		i->empty = 1;
854		return -EINVAL;
855	}
856	t.min = div_down(best_num, best_den);
857	t.openmin = !!(best_num % best_den);
858
859	result_num = best_num;
860	result_diff = best_diff;
861	result_den = best_den;
862	best_num = best_den = best_diff = 0;
863	for (k = 0; k < rats_count; ++k) {
864		unsigned int num = rats[k].num;
865		unsigned int den;
866		unsigned int q = i->max;
867		int diff;
868		if (q == 0) {
869			i->empty = 1;
870			return -EINVAL;
871		}
872		den = div_down(num, q);
873		if (den > rats[k].den_max)
874			continue;
875		if (den < rats[k].den_min)
876			den = rats[k].den_min;
877		else {
878			unsigned int r;
879			r = (den - rats[k].den_min) % rats[k].den_step;
880			if (r != 0)
881				den += rats[k].den_step - r;
882		}
883		diff = q * den - num;
884		if (diff < 0)
885			diff = -diff;
886		if (best_num == 0 ||
887		    diff * best_den < best_diff * den) {
888			best_diff = diff;
889			best_den = den;
890			best_num = num;
891		}
892	}
893	if (best_den == 0) {
894		i->empty = 1;
895		return -EINVAL;
896	}
897	t.max = div_up(best_num, best_den);
898	t.openmax = !!(best_num % best_den);
899	t.integer = 0;
900	err = snd_interval_refine(i, &t);
901	if (err < 0)
902		return err;
903
904	if (snd_interval_single(i)) {
905		if (best_diff * result_den < result_diff * best_den) {
906			result_num = best_num;
907			result_den = best_den;
908		}
909		if (nump)
910			*nump = result_num;
911		if (denp)
912			*denp = result_den;
913	}
914	return err;
915}
916EXPORT_SYMBOL(snd_interval_ratnum);
917
918/**
919 * snd_interval_ratden - refine the interval value
920 * @i: interval to refine
921 * @rats_count: number of struct ratden
922 * @rats: struct ratden array
923 * @nump: pointer to store the resultant numerator
924 * @denp: pointer to store the resultant denominator
925 *
926 * Return: Positive if the value is changed, zero if it's not changed, or a
927 * negative error code.
928 */
929static int snd_interval_ratden(struct snd_interval *i,
930			       unsigned int rats_count,
931			       const struct snd_ratden *rats,
932			       unsigned int *nump, unsigned int *denp)
933{
934	unsigned int best_num, best_diff, best_den;
935	unsigned int k;
936	struct snd_interval t;
937	int err;
938
939	best_num = best_den = best_diff = 0;
940	for (k = 0; k < rats_count; ++k) {
941		unsigned int num;
942		unsigned int den = rats[k].den;
943		unsigned int q = i->min;
944		int diff;
945		num = mul(q, den);
946		if (num > rats[k].num_max)
947			continue;
948		if (num < rats[k].num_min)
949			num = rats[k].num_max;
950		else {
951			unsigned int r;
952			r = (num - rats[k].num_min) % rats[k].num_step;
953			if (r != 0)
954				num += rats[k].num_step - r;
955		}
956		diff = num - q * den;
957		if (best_num == 0 ||
958		    diff * best_den < best_diff * den) {
959			best_diff = diff;
960			best_den = den;
961			best_num = num;
962		}
963	}
964	if (best_den == 0) {
965		i->empty = 1;
966		return -EINVAL;
967	}
968	t.min = div_down(best_num, best_den);
969	t.openmin = !!(best_num % best_den);
970
971	best_num = best_den = best_diff = 0;
972	for (k = 0; k < rats_count; ++k) {
973		unsigned int num;
974		unsigned int den = rats[k].den;
975		unsigned int q = i->max;
976		int diff;
977		num = mul(q, den);
978		if (num < rats[k].num_min)
979			continue;
980		if (num > rats[k].num_max)
981			num = rats[k].num_max;
982		else {
983			unsigned int r;
984			r = (num - rats[k].num_min) % rats[k].num_step;
985			if (r != 0)
986				num -= r;
987		}
988		diff = q * den - num;
989		if (best_num == 0 ||
990		    diff * best_den < best_diff * den) {
991			best_diff = diff;
992			best_den = den;
993			best_num = num;
994		}
995	}
996	if (best_den == 0) {
997		i->empty = 1;
998		return -EINVAL;
999	}
1000	t.max = div_up(best_num, best_den);
1001	t.openmax = !!(best_num % best_den);
1002	t.integer = 0;
1003	err = snd_interval_refine(i, &t);
1004	if (err < 0)
1005		return err;
1006
1007	if (snd_interval_single(i)) {
1008		if (nump)
1009			*nump = best_num;
1010		if (denp)
1011			*denp = best_den;
1012	}
1013	return err;
1014}
1015
1016/**
1017 * snd_interval_list - refine the interval value from the list
1018 * @i: the interval value to refine
1019 * @count: the number of elements in the list
1020 * @list: the value list
1021 * @mask: the bit-mask to evaluate
1022 *
1023 * Refines the interval value from the list.
1024 * When mask is non-zero, only the elements corresponding to bit 1 are
1025 * evaluated.
1026 *
1027 * Return: Positive if the value is changed, zero if it's not changed, or a
1028 * negative error code.
1029 */
1030int snd_interval_list(struct snd_interval *i, unsigned int count,
1031		      const unsigned int *list, unsigned int mask)
1032{
1033        unsigned int k;
1034	struct snd_interval list_range;
1035
1036	if (!count) {
1037		i->empty = 1;
1038		return -EINVAL;
1039	}
1040	snd_interval_any(&list_range);
1041	list_range.min = UINT_MAX;
1042	list_range.max = 0;
1043        for (k = 0; k < count; k++) {
1044		if (mask && !(mask & (1 << k)))
1045			continue;
1046		if (!snd_interval_test(i, list[k]))
1047			continue;
1048		list_range.min = min(list_range.min, list[k]);
1049		list_range.max = max(list_range.max, list[k]);
1050        }
1051	return snd_interval_refine(i, &list_range);
1052}
1053EXPORT_SYMBOL(snd_interval_list);
1054
1055/**
1056 * snd_interval_ranges - refine the interval value from the list of ranges
1057 * @i: the interval value to refine
1058 * @count: the number of elements in the list of ranges
1059 * @ranges: the ranges list
1060 * @mask: the bit-mask to evaluate
1061 *
1062 * Refines the interval value from the list of ranges.
1063 * When mask is non-zero, only the elements corresponding to bit 1 are
1064 * evaluated.
1065 *
1066 * Return: Positive if the value is changed, zero if it's not changed, or a
1067 * negative error code.
1068 */
1069int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1070			const struct snd_interval *ranges, unsigned int mask)
1071{
1072	unsigned int k;
1073	struct snd_interval range_union;
1074	struct snd_interval range;
1075
1076	if (!count) {
1077		snd_interval_none(i);
1078		return -EINVAL;
1079	}
1080	snd_interval_any(&range_union);
1081	range_union.min = UINT_MAX;
1082	range_union.max = 0;
1083	for (k = 0; k < count; k++) {
1084		if (mask && !(mask & (1 << k)))
1085			continue;
1086		snd_interval_copy(&range, &ranges[k]);
1087		if (snd_interval_refine(&range, i) < 0)
1088			continue;
1089		if (snd_interval_empty(&range))
1090			continue;
1091
1092		if (range.min < range_union.min) {
1093			range_union.min = range.min;
1094			range_union.openmin = 1;
1095		}
1096		if (range.min == range_union.min && !range.openmin)
1097			range_union.openmin = 0;
1098		if (range.max > range_union.max) {
1099			range_union.max = range.max;
1100			range_union.openmax = 1;
1101		}
1102		if (range.max == range_union.max && !range.openmax)
1103			range_union.openmax = 0;
1104	}
1105	return snd_interval_refine(i, &range_union);
1106}
1107EXPORT_SYMBOL(snd_interval_ranges);
1108
1109static int snd_interval_step(struct snd_interval *i, unsigned int step)
1110{
1111	unsigned int n;
1112	int changed = 0;
1113	n = i->min % step;
1114	if (n != 0 || i->openmin) {
1115		i->min += step - n;
1116		i->openmin = 0;
1117		changed = 1;
1118	}
1119	n = i->max % step;
1120	if (n != 0 || i->openmax) {
1121		i->max -= n;
1122		i->openmax = 0;
1123		changed = 1;
1124	}
1125	if (snd_interval_checkempty(i)) {
1126		i->empty = 1;
1127		return -EINVAL;
1128	}
1129	return changed;
1130}
1131
1132/* Info constraints helpers */
1133
1134/**
1135 * snd_pcm_hw_rule_add - add the hw-constraint rule
1136 * @runtime: the pcm runtime instance
1137 * @cond: condition bits
1138 * @var: the variable to evaluate
1139 * @func: the evaluation function
1140 * @private: the private data pointer passed to function
1141 * @dep: the dependent variables
1142 *
1143 * Return: Zero if successful, or a negative error code on failure.
1144 */
1145int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1146			int var,
1147			snd_pcm_hw_rule_func_t func, void *private,
1148			int dep, ...)
1149{
1150	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1151	struct snd_pcm_hw_rule *c;
1152	unsigned int k;
1153	va_list args;
1154	va_start(args, dep);
1155	if (constrs->rules_num >= constrs->rules_all) {
1156		struct snd_pcm_hw_rule *new;
1157		unsigned int new_rules = constrs->rules_all + 16;
1158		new = krealloc_array(constrs->rules, new_rules,
1159				     sizeof(*c), GFP_KERNEL);
1160		if (!new) {
1161			va_end(args);
1162			return -ENOMEM;
1163		}
1164		constrs->rules = new;
1165		constrs->rules_all = new_rules;
1166	}
1167	c = &constrs->rules[constrs->rules_num];
1168	c->cond = cond;
1169	c->func = func;
1170	c->var = var;
1171	c->private = private;
1172	k = 0;
1173	while (1) {
1174		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1175			va_end(args);
1176			return -EINVAL;
1177		}
1178		c->deps[k++] = dep;
1179		if (dep < 0)
1180			break;
1181		dep = va_arg(args, int);
1182	}
1183	constrs->rules_num++;
1184	va_end(args);
1185	return 0;
1186}
1187EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1188
1189/**
1190 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1191 * @runtime: PCM runtime instance
1192 * @var: hw_params variable to apply the mask
1193 * @mask: the bitmap mask
1194 *
1195 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1196 *
1197 * Return: Zero if successful, or a negative error code on failure.
1198 */
1199int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1200			       u_int32_t mask)
1201{
1202	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1203	struct snd_mask *maskp = constrs_mask(constrs, var);
1204	*maskp->bits &= mask;
1205	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1206	if (*maskp->bits == 0)
1207		return -EINVAL;
1208	return 0;
1209}
1210
1211/**
1212 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1213 * @runtime: PCM runtime instance
1214 * @var: hw_params variable to apply the mask
1215 * @mask: the 64bit bitmap mask
1216 *
1217 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1218 *
1219 * Return: Zero if successful, or a negative error code on failure.
1220 */
1221int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1222				 u_int64_t mask)
1223{
1224	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1225	struct snd_mask *maskp = constrs_mask(constrs, var);
1226	maskp->bits[0] &= (u_int32_t)mask;
1227	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1228	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1229	if (! maskp->bits[0] && ! maskp->bits[1])
1230		return -EINVAL;
1231	return 0;
1232}
1233EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1234
1235/**
1236 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1237 * @runtime: PCM runtime instance
1238 * @var: hw_params variable to apply the integer constraint
1239 *
1240 * Apply the constraint of integer to an interval parameter.
1241 *
1242 * Return: Positive if the value is changed, zero if it's not changed, or a
1243 * negative error code.
1244 */
1245int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1246{
1247	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1248	return snd_interval_setinteger(constrs_interval(constrs, var));
1249}
1250EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1251
1252/**
1253 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1254 * @runtime: PCM runtime instance
1255 * @var: hw_params variable to apply the range
1256 * @min: the minimal value
1257 * @max: the maximal value
1258 *
1259 * Apply the min/max range constraint to an interval parameter.
1260 *
1261 * Return: Positive if the value is changed, zero if it's not changed, or a
1262 * negative error code.
1263 */
1264int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1265				 unsigned int min, unsigned int max)
1266{
1267	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268	struct snd_interval t;
1269	t.min = min;
1270	t.max = max;
1271	t.openmin = t.openmax = 0;
1272	t.integer = 0;
1273	return snd_interval_refine(constrs_interval(constrs, var), &t);
1274}
1275EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1276
1277static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1278				struct snd_pcm_hw_rule *rule)
1279{
1280	struct snd_pcm_hw_constraint_list *list = rule->private;
1281	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1282}
1283
1284
1285/**
1286 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1287 * @runtime: PCM runtime instance
1288 * @cond: condition bits
1289 * @var: hw_params variable to apply the list constraint
1290 * @l: list
1291 *
1292 * Apply the list of constraints to an interval parameter.
1293 *
1294 * Return: Zero if successful, or a negative error code on failure.
1295 */
1296int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1297			       unsigned int cond,
1298			       snd_pcm_hw_param_t var,
1299			       const struct snd_pcm_hw_constraint_list *l)
1300{
1301	return snd_pcm_hw_rule_add(runtime, cond, var,
1302				   snd_pcm_hw_rule_list, (void *)l,
1303				   var, -1);
1304}
1305EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1306
1307static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1308				  struct snd_pcm_hw_rule *rule)
1309{
1310	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1311	return snd_interval_ranges(hw_param_interval(params, rule->var),
1312				   r->count, r->ranges, r->mask);
1313}
1314
1315
1316/**
1317 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1318 * @runtime: PCM runtime instance
1319 * @cond: condition bits
1320 * @var: hw_params variable to apply the list of range constraints
1321 * @r: ranges
1322 *
1323 * Apply the list of range constraints to an interval parameter.
1324 *
1325 * Return: Zero if successful, or a negative error code on failure.
1326 */
1327int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1328				 unsigned int cond,
1329				 snd_pcm_hw_param_t var,
1330				 const struct snd_pcm_hw_constraint_ranges *r)
1331{
1332	return snd_pcm_hw_rule_add(runtime, cond, var,
1333				   snd_pcm_hw_rule_ranges, (void *)r,
1334				   var, -1);
1335}
1336EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1337
1338static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1339				   struct snd_pcm_hw_rule *rule)
1340{
1341	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1342	unsigned int num = 0, den = 0;
1343	int err;
1344	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1345				  r->nrats, r->rats, &num, &den);
1346	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1347		params->rate_num = num;
1348		params->rate_den = den;
1349	}
1350	return err;
1351}
1352
1353/**
1354 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1355 * @runtime: PCM runtime instance
1356 * @cond: condition bits
1357 * @var: hw_params variable to apply the ratnums constraint
1358 * @r: struct snd_ratnums constriants
1359 *
1360 * Return: Zero if successful, or a negative error code on failure.
1361 */
1362int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1363				  unsigned int cond,
1364				  snd_pcm_hw_param_t var,
1365				  const struct snd_pcm_hw_constraint_ratnums *r)
1366{
1367	return snd_pcm_hw_rule_add(runtime, cond, var,
1368				   snd_pcm_hw_rule_ratnums, (void *)r,
1369				   var, -1);
1370}
1371EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1372
1373static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1374				   struct snd_pcm_hw_rule *rule)
1375{
1376	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1377	unsigned int num = 0, den = 0;
1378	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1379				  r->nrats, r->rats, &num, &den);
1380	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1381		params->rate_num = num;
1382		params->rate_den = den;
1383	}
1384	return err;
1385}
1386
1387/**
1388 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1389 * @runtime: PCM runtime instance
1390 * @cond: condition bits
1391 * @var: hw_params variable to apply the ratdens constraint
1392 * @r: struct snd_ratdens constriants
1393 *
1394 * Return: Zero if successful, or a negative error code on failure.
1395 */
1396int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1397				  unsigned int cond,
1398				  snd_pcm_hw_param_t var,
1399				  const struct snd_pcm_hw_constraint_ratdens *r)
1400{
1401	return snd_pcm_hw_rule_add(runtime, cond, var,
1402				   snd_pcm_hw_rule_ratdens, (void *)r,
1403				   var, -1);
1404}
1405EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1406
1407static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1408				  struct snd_pcm_hw_rule *rule)
1409{
1410	unsigned int l = (unsigned long) rule->private;
1411	int width = l & 0xffff;
1412	unsigned int msbits = l >> 16;
1413	const struct snd_interval *i =
1414		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1415
1416	if (!snd_interval_single(i))
1417		return 0;
1418
1419	if ((snd_interval_value(i) == width) ||
1420	    (width == 0 && snd_interval_value(i) > msbits))
1421		params->msbits = min_not_zero(params->msbits, msbits);
1422
1423	return 0;
1424}
1425
1426/**
1427 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1428 * @runtime: PCM runtime instance
1429 * @cond: condition bits
1430 * @width: sample bits width
1431 * @msbits: msbits width
1432 *
1433 * This constraint will set the number of most significant bits (msbits) if a
1434 * sample format with the specified width has been select. If width is set to 0
1435 * the msbits will be set for any sample format with a width larger than the
1436 * specified msbits.
1437 *
1438 * Return: Zero if successful, or a negative error code on failure.
1439 */
1440int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1441				 unsigned int cond,
1442				 unsigned int width,
1443				 unsigned int msbits)
1444{
1445	unsigned long l = (msbits << 16) | width;
1446	return snd_pcm_hw_rule_add(runtime, cond, -1,
1447				    snd_pcm_hw_rule_msbits,
1448				    (void*) l,
1449				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1450}
1451EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1452
1453static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1454				struct snd_pcm_hw_rule *rule)
1455{
1456	unsigned long step = (unsigned long) rule->private;
1457	return snd_interval_step(hw_param_interval(params, rule->var), step);
1458}
1459
1460/**
1461 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1462 * @runtime: PCM runtime instance
1463 * @cond: condition bits
1464 * @var: hw_params variable to apply the step constraint
1465 * @step: step size
1466 *
1467 * Return: Zero if successful, or a negative error code on failure.
1468 */
1469int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1470			       unsigned int cond,
1471			       snd_pcm_hw_param_t var,
1472			       unsigned long step)
1473{
1474	return snd_pcm_hw_rule_add(runtime, cond, var,
1475				   snd_pcm_hw_rule_step, (void *) step,
1476				   var, -1);
1477}
1478EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1479
1480static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1481{
1482	static const unsigned int pow2_sizes[] = {
1483		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1484		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1485		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1486		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1487	};
1488	return snd_interval_list(hw_param_interval(params, rule->var),
1489				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1490}
1491
1492/**
1493 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1494 * @runtime: PCM runtime instance
1495 * @cond: condition bits
1496 * @var: hw_params variable to apply the power-of-2 constraint
1497 *
1498 * Return: Zero if successful, or a negative error code on failure.
1499 */
1500int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1501			       unsigned int cond,
1502			       snd_pcm_hw_param_t var)
1503{
1504	return snd_pcm_hw_rule_add(runtime, cond, var,
1505				   snd_pcm_hw_rule_pow2, NULL,
1506				   var, -1);
1507}
1508EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1509
1510static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1511					   struct snd_pcm_hw_rule *rule)
1512{
1513	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1514	struct snd_interval *rate;
1515
1516	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1517	return snd_interval_list(rate, 1, &base_rate, 0);
1518}
1519
1520/**
1521 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1522 * @runtime: PCM runtime instance
1523 * @base_rate: the rate at which the hardware does not resample
1524 *
1525 * Return: Zero if successful, or a negative error code on failure.
1526 */
1527int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1528			       unsigned int base_rate)
1529{
1530	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1531				   SNDRV_PCM_HW_PARAM_RATE,
1532				   snd_pcm_hw_rule_noresample_func,
1533				   (void *)(uintptr_t)base_rate,
1534				   SNDRV_PCM_HW_PARAM_RATE, -1);
1535}
1536EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1537
1538static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1539				  snd_pcm_hw_param_t var)
1540{
1541	if (hw_is_mask(var)) {
1542		snd_mask_any(hw_param_mask(params, var));
1543		params->cmask |= 1 << var;
1544		params->rmask |= 1 << var;
1545		return;
1546	}
1547	if (hw_is_interval(var)) {
1548		snd_interval_any(hw_param_interval(params, var));
1549		params->cmask |= 1 << var;
1550		params->rmask |= 1 << var;
1551		return;
1552	}
1553	snd_BUG();
1554}
1555
1556void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1557{
1558	unsigned int k;
1559	memset(params, 0, sizeof(*params));
1560	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1561		_snd_pcm_hw_param_any(params, k);
1562	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1563		_snd_pcm_hw_param_any(params, k);
1564	params->info = ~0U;
1565}
1566EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1567
1568/**
1569 * snd_pcm_hw_param_value - return @params field @var value
1570 * @params: the hw_params instance
1571 * @var: parameter to retrieve
1572 * @dir: pointer to the direction (-1,0,1) or %NULL
1573 *
1574 * Return: The value for field @var if it's fixed in configuration space
1575 * defined by @params. -%EINVAL otherwise.
1576 */
1577int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1578			   snd_pcm_hw_param_t var, int *dir)
1579{
1580	if (hw_is_mask(var)) {
1581		const struct snd_mask *mask = hw_param_mask_c(params, var);
1582		if (!snd_mask_single(mask))
1583			return -EINVAL;
1584		if (dir)
1585			*dir = 0;
1586		return snd_mask_value(mask);
1587	}
1588	if (hw_is_interval(var)) {
1589		const struct snd_interval *i = hw_param_interval_c(params, var);
1590		if (!snd_interval_single(i))
1591			return -EINVAL;
1592		if (dir)
1593			*dir = i->openmin;
1594		return snd_interval_value(i);
1595	}
1596	return -EINVAL;
1597}
1598EXPORT_SYMBOL(snd_pcm_hw_param_value);
1599
1600void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1601				snd_pcm_hw_param_t var)
1602{
1603	if (hw_is_mask(var)) {
1604		snd_mask_none(hw_param_mask(params, var));
1605		params->cmask |= 1 << var;
1606		params->rmask |= 1 << var;
1607	} else if (hw_is_interval(var)) {
1608		snd_interval_none(hw_param_interval(params, var));
1609		params->cmask |= 1 << var;
1610		params->rmask |= 1 << var;
1611	} else {
1612		snd_BUG();
1613	}
1614}
1615EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1616
1617static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1618				   snd_pcm_hw_param_t var)
1619{
1620	int changed;
1621	if (hw_is_mask(var))
1622		changed = snd_mask_refine_first(hw_param_mask(params, var));
1623	else if (hw_is_interval(var))
1624		changed = snd_interval_refine_first(hw_param_interval(params, var));
1625	else
1626		return -EINVAL;
1627	if (changed > 0) {
1628		params->cmask |= 1 << var;
1629		params->rmask |= 1 << var;
1630	}
1631	return changed;
1632}
1633
1634
1635/**
1636 * snd_pcm_hw_param_first - refine config space and return minimum value
1637 * @pcm: PCM instance
1638 * @params: the hw_params instance
1639 * @var: parameter to retrieve
1640 * @dir: pointer to the direction (-1,0,1) or %NULL
1641 *
1642 * Inside configuration space defined by @params remove from @var all
1643 * values > minimum. Reduce configuration space accordingly.
1644 *
1645 * Return: The minimum, or a negative error code on failure.
1646 */
1647int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1648			   struct snd_pcm_hw_params *params,
1649			   snd_pcm_hw_param_t var, int *dir)
1650{
1651	int changed = _snd_pcm_hw_param_first(params, var);
1652	if (changed < 0)
1653		return changed;
1654	if (params->rmask) {
1655		int err = snd_pcm_hw_refine(pcm, params);
1656		if (err < 0)
1657			return err;
1658	}
1659	return snd_pcm_hw_param_value(params, var, dir);
1660}
1661EXPORT_SYMBOL(snd_pcm_hw_param_first);
1662
1663static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1664				  snd_pcm_hw_param_t var)
1665{
1666	int changed;
1667	if (hw_is_mask(var))
1668		changed = snd_mask_refine_last(hw_param_mask(params, var));
1669	else if (hw_is_interval(var))
1670		changed = snd_interval_refine_last(hw_param_interval(params, var));
1671	else
1672		return -EINVAL;
1673	if (changed > 0) {
1674		params->cmask |= 1 << var;
1675		params->rmask |= 1 << var;
1676	}
1677	return changed;
1678}
1679
1680
1681/**
1682 * snd_pcm_hw_param_last - refine config space and return maximum value
1683 * @pcm: PCM instance
1684 * @params: the hw_params instance
1685 * @var: parameter to retrieve
1686 * @dir: pointer to the direction (-1,0,1) or %NULL
1687 *
1688 * Inside configuration space defined by @params remove from @var all
1689 * values < maximum. Reduce configuration space accordingly.
1690 *
1691 * Return: The maximum, or a negative error code on failure.
1692 */
1693int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1694			  struct snd_pcm_hw_params *params,
1695			  snd_pcm_hw_param_t var, int *dir)
1696{
1697	int changed = _snd_pcm_hw_param_last(params, var);
1698	if (changed < 0)
1699		return changed;
1700	if (params->rmask) {
1701		int err = snd_pcm_hw_refine(pcm, params);
1702		if (err < 0)
1703			return err;
1704	}
1705	return snd_pcm_hw_param_value(params, var, dir);
1706}
1707EXPORT_SYMBOL(snd_pcm_hw_param_last);
1708
1709static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1710				   void *arg)
1711{
1712	struct snd_pcm_runtime *runtime = substream->runtime;
1713	unsigned long flags;
1714	snd_pcm_stream_lock_irqsave(substream, flags);
1715	if (snd_pcm_running(substream) &&
1716	    snd_pcm_update_hw_ptr(substream) >= 0)
1717		runtime->status->hw_ptr %= runtime->buffer_size;
1718	else {
1719		runtime->status->hw_ptr = 0;
1720		runtime->hw_ptr_wrap = 0;
1721	}
1722	snd_pcm_stream_unlock_irqrestore(substream, flags);
1723	return 0;
1724}
1725
1726static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1727					  void *arg)
1728{
1729	struct snd_pcm_channel_info *info = arg;
1730	struct snd_pcm_runtime *runtime = substream->runtime;
1731	int width;
1732	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1733		info->offset = -1;
1734		return 0;
1735	}
1736	width = snd_pcm_format_physical_width(runtime->format);
1737	if (width < 0)
1738		return width;
1739	info->offset = 0;
1740	switch (runtime->access) {
1741	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1742	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1743		info->first = info->channel * width;
1744		info->step = runtime->channels * width;
1745		break;
1746	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1747	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1748	{
1749		size_t size = runtime->dma_bytes / runtime->channels;
1750		info->first = info->channel * size * 8;
1751		info->step = width;
1752		break;
1753	}
1754	default:
1755		snd_BUG();
1756		break;
1757	}
1758	return 0;
1759}
1760
1761static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1762				       void *arg)
1763{
1764	struct snd_pcm_hw_params *params = arg;
1765	snd_pcm_format_t format;
1766	int channels;
1767	ssize_t frame_size;
1768
1769	params->fifo_size = substream->runtime->hw.fifo_size;
1770	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1771		format = params_format(params);
1772		channels = params_channels(params);
1773		frame_size = snd_pcm_format_size(format, channels);
1774		if (frame_size > 0)
1775			params->fifo_size /= frame_size;
1776	}
1777	return 0;
1778}
1779
1780/**
1781 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1782 * @substream: the pcm substream instance
1783 * @cmd: ioctl command
1784 * @arg: ioctl argument
1785 *
1786 * Processes the generic ioctl commands for PCM.
1787 * Can be passed as the ioctl callback for PCM ops.
1788 *
1789 * Return: Zero if successful, or a negative error code on failure.
1790 */
1791int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1792		      unsigned int cmd, void *arg)
1793{
1794	switch (cmd) {
1795	case SNDRV_PCM_IOCTL1_RESET:
1796		return snd_pcm_lib_ioctl_reset(substream, arg);
1797	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1798		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1799	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1800		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1801	}
1802	return -ENXIO;
1803}
1804EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1805
1806/**
1807 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1808 *						under acquired lock of PCM substream.
1809 * @substream: the instance of pcm substream.
1810 *
1811 * This function is called when the batch of audio data frames as the same size as the period of
1812 * buffer is already processed in audio data transmission.
1813 *
1814 * The call of function updates the status of runtime with the latest position of audio data
1815 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1816 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1817 * substream according to configured threshold.
1818 *
1819 * The function is intended to use for the case that PCM driver operates audio data frames under
1820 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1821 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1822 * since lock of PCM substream should be acquired in advance.
1823 *
1824 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1825 * function:
1826 *
1827 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1828 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1829 * - .get_time_info - to retrieve audio time stamp if needed.
1830 *
1831 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1832 */
1833void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1834{
1835	struct snd_pcm_runtime *runtime;
1836
1837	if (PCM_RUNTIME_CHECK(substream))
1838		return;
1839	runtime = substream->runtime;
1840
1841	if (!snd_pcm_running(substream) ||
1842	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1843		goto _end;
1844
1845#ifdef CONFIG_SND_PCM_TIMER
1846	if (substream->timer_running)
1847		snd_timer_interrupt(substream->timer, 1);
1848#endif
1849 _end:
1850	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1851}
1852EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1853
1854/**
1855 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1856 *			      PCM substream.
1857 * @substream: the instance of PCM substream.
1858 *
1859 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1860 * acquiring lock of PCM substream voluntarily.
1861 *
1862 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1863 * the batch of audio data frames as the same size as the period of buffer is already processed in
1864 * audio data transmission.
1865 */
1866void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1867{
1868	unsigned long flags;
1869
1870	if (snd_BUG_ON(!substream))
1871		return;
1872
1873	snd_pcm_stream_lock_irqsave(substream, flags);
1874	snd_pcm_period_elapsed_under_stream_lock(substream);
1875	snd_pcm_stream_unlock_irqrestore(substream, flags);
1876}
1877EXPORT_SYMBOL(snd_pcm_period_elapsed);
1878
1879/*
1880 * Wait until avail_min data becomes available
1881 * Returns a negative error code if any error occurs during operation.
1882 * The available space is stored on availp.  When err = 0 and avail = 0
1883 * on the capture stream, it indicates the stream is in DRAINING state.
1884 */
1885static int wait_for_avail(struct snd_pcm_substream *substream,
1886			      snd_pcm_uframes_t *availp)
1887{
1888	struct snd_pcm_runtime *runtime = substream->runtime;
1889	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1890	wait_queue_entry_t wait;
1891	int err = 0;
1892	snd_pcm_uframes_t avail = 0;
1893	long wait_time, tout;
1894
1895	init_waitqueue_entry(&wait, current);
1896	set_current_state(TASK_INTERRUPTIBLE);
1897	add_wait_queue(&runtime->tsleep, &wait);
1898
1899	if (runtime->no_period_wakeup)
1900		wait_time = MAX_SCHEDULE_TIMEOUT;
1901	else {
1902		/* use wait time from substream if available */
1903		if (substream->wait_time) {
1904			wait_time = substream->wait_time;
1905		} else {
1906			wait_time = 100;
1907
1908			if (runtime->rate) {
1909				long t = runtime->buffer_size * 1100 / runtime->rate;
1910				wait_time = max(t, wait_time);
1911			}
1912		}
1913		wait_time = msecs_to_jiffies(wait_time);
1914	}
1915
1916	for (;;) {
1917		if (signal_pending(current)) {
1918			err = -ERESTARTSYS;
1919			break;
1920		}
1921
1922		/*
1923		 * We need to check if space became available already
1924		 * (and thus the wakeup happened already) first to close
1925		 * the race of space already having become available.
1926		 * This check must happen after been added to the waitqueue
1927		 * and having current state be INTERRUPTIBLE.
1928		 */
1929		avail = snd_pcm_avail(substream);
1930		if (avail >= runtime->twake)
1931			break;
1932		snd_pcm_stream_unlock_irq(substream);
1933
1934		tout = schedule_timeout(wait_time);
1935
1936		snd_pcm_stream_lock_irq(substream);
1937		set_current_state(TASK_INTERRUPTIBLE);
1938		switch (runtime->state) {
1939		case SNDRV_PCM_STATE_SUSPENDED:
1940			err = -ESTRPIPE;
1941			goto _endloop;
1942		case SNDRV_PCM_STATE_XRUN:
1943			err = -EPIPE;
1944			goto _endloop;
1945		case SNDRV_PCM_STATE_DRAINING:
1946			if (is_playback)
1947				err = -EPIPE;
1948			else
1949				avail = 0; /* indicate draining */
1950			goto _endloop;
1951		case SNDRV_PCM_STATE_OPEN:
1952		case SNDRV_PCM_STATE_SETUP:
1953		case SNDRV_PCM_STATE_DISCONNECTED:
1954			err = -EBADFD;
1955			goto _endloop;
1956		case SNDRV_PCM_STATE_PAUSED:
1957			continue;
1958		}
1959		if (!tout) {
1960			pcm_dbg(substream->pcm,
1961				"%s timeout (DMA or IRQ trouble?)\n",
1962				is_playback ? "playback write" : "capture read");
1963			err = -EIO;
1964			break;
1965		}
1966	}
1967 _endloop:
1968	set_current_state(TASK_RUNNING);
1969	remove_wait_queue(&runtime->tsleep, &wait);
1970	*availp = avail;
1971	return err;
1972}
1973
1974typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1975			      int channel, unsigned long hwoff,
1976			      struct iov_iter *iter, unsigned long bytes);
1977
1978typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1979			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
1980			  bool);
1981
1982/* calculate the target DMA-buffer position to be written/read */
1983static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1984			   int channel, unsigned long hwoff)
1985{
1986	return runtime->dma_area + hwoff +
1987		channel * (runtime->dma_bytes / runtime->channels);
1988}
1989
1990/* default copy ops for write; used for both interleaved and non- modes */
1991static int default_write_copy(struct snd_pcm_substream *substream,
1992			      int channel, unsigned long hwoff,
1993			      struct iov_iter *iter, unsigned long bytes)
1994{
1995	if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
1996			   bytes, iter) != bytes)
1997		return -EFAULT;
1998	return 0;
1999}
2000
2001/* fill silence instead of copy data; called as a transfer helper
2002 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2003 * a NULL buffer is passed
2004 */
2005static int fill_silence(struct snd_pcm_substream *substream, int channel,
2006			unsigned long hwoff, struct iov_iter *iter,
2007			unsigned long bytes)
2008{
2009	struct snd_pcm_runtime *runtime = substream->runtime;
2010
2011	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2012		return 0;
2013	if (substream->ops->fill_silence)
2014		return substream->ops->fill_silence(substream, channel,
2015						    hwoff, bytes);
2016
2017	snd_pcm_format_set_silence(runtime->format,
2018				   get_dma_ptr(runtime, channel, hwoff),
2019				   bytes_to_samples(runtime, bytes));
2020	return 0;
2021}
2022
2023/* default copy ops for read; used for both interleaved and non- modes */
2024static int default_read_copy(struct snd_pcm_substream *substream,
2025			     int channel, unsigned long hwoff,
2026			     struct iov_iter *iter, unsigned long bytes)
2027{
2028	if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2029			 bytes, iter) != bytes)
2030		return -EFAULT;
2031	return 0;
2032}
2033
2034/* call transfer with the filled iov_iter */
2035static int do_transfer(struct snd_pcm_substream *substream, int c,
2036		       unsigned long hwoff, void *data, unsigned long bytes,
2037		       pcm_transfer_f transfer, bool in_kernel)
2038{
2039	struct iov_iter iter;
2040	int err, type;
2041
2042	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2043		type = ITER_SOURCE;
2044	else
2045		type = ITER_DEST;
2046
2047	if (in_kernel) {
2048		struct kvec kvec = { data, bytes };
2049
2050		iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2051		return transfer(substream, c, hwoff, &iter, bytes);
2052	}
2053
2054	err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2055	if (err)
2056		return err;
2057	return transfer(substream, c, hwoff, &iter, bytes);
2058}
2059
2060/* call transfer function with the converted pointers and sizes;
2061 * for interleaved mode, it's one shot for all samples
2062 */
2063static int interleaved_copy(struct snd_pcm_substream *substream,
2064			    snd_pcm_uframes_t hwoff, void *data,
2065			    snd_pcm_uframes_t off,
2066			    snd_pcm_uframes_t frames,
2067			    pcm_transfer_f transfer,
2068			    bool in_kernel)
2069{
2070	struct snd_pcm_runtime *runtime = substream->runtime;
2071
2072	/* convert to bytes */
2073	hwoff = frames_to_bytes(runtime, hwoff);
2074	off = frames_to_bytes(runtime, off);
2075	frames = frames_to_bytes(runtime, frames);
2076
2077	return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2078			   in_kernel);
2079}
2080
2081/* call transfer function with the converted pointers and sizes for each
2082 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2083 */
2084static int noninterleaved_copy(struct snd_pcm_substream *substream,
2085			       snd_pcm_uframes_t hwoff, void *data,
2086			       snd_pcm_uframes_t off,
2087			       snd_pcm_uframes_t frames,
2088			       pcm_transfer_f transfer,
2089			       bool in_kernel)
2090{
2091	struct snd_pcm_runtime *runtime = substream->runtime;
2092	int channels = runtime->channels;
2093	void **bufs = data;
2094	int c, err;
2095
2096	/* convert to bytes; note that it's not frames_to_bytes() here.
2097	 * in non-interleaved mode, we copy for each channel, thus
2098	 * each copy is n_samples bytes x channels = whole frames.
2099	 */
2100	off = samples_to_bytes(runtime, off);
2101	frames = samples_to_bytes(runtime, frames);
2102	hwoff = samples_to_bytes(runtime, hwoff);
2103	for (c = 0; c < channels; ++c, ++bufs) {
2104		if (!data || !*bufs)
2105			err = fill_silence(substream, c, hwoff, NULL, frames);
2106		else
2107			err = do_transfer(substream, c, hwoff, *bufs + off,
2108					  frames, transfer, in_kernel);
2109		if (err < 0)
2110			return err;
2111	}
2112	return 0;
2113}
2114
2115/* fill silence on the given buffer position;
2116 * called from snd_pcm_playback_silence()
2117 */
2118static int fill_silence_frames(struct snd_pcm_substream *substream,
2119			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2120{
2121	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2122	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2123		return interleaved_copy(substream, off, NULL, 0, frames,
2124					fill_silence, true);
2125	else
2126		return noninterleaved_copy(substream, off, NULL, 0, frames,
2127					   fill_silence, true);
2128}
2129
2130/* sanity-check for read/write methods */
2131static int pcm_sanity_check(struct snd_pcm_substream *substream)
2132{
2133	struct snd_pcm_runtime *runtime;
2134	if (PCM_RUNTIME_CHECK(substream))
2135		return -ENXIO;
2136	runtime = substream->runtime;
2137	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2138		return -EINVAL;
2139	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2140		return -EBADFD;
2141	return 0;
2142}
2143
2144static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2145{
2146	switch (runtime->state) {
2147	case SNDRV_PCM_STATE_PREPARED:
2148	case SNDRV_PCM_STATE_RUNNING:
2149	case SNDRV_PCM_STATE_PAUSED:
2150		return 0;
2151	case SNDRV_PCM_STATE_XRUN:
2152		return -EPIPE;
2153	case SNDRV_PCM_STATE_SUSPENDED:
2154		return -ESTRPIPE;
2155	default:
2156		return -EBADFD;
2157	}
2158}
2159
2160/* update to the given appl_ptr and call ack callback if needed;
2161 * when an error is returned, take back to the original value
2162 */
2163int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2164			   snd_pcm_uframes_t appl_ptr)
2165{
2166	struct snd_pcm_runtime *runtime = substream->runtime;
2167	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2168	snd_pcm_sframes_t diff;
2169	int ret;
2170
2171	if (old_appl_ptr == appl_ptr)
2172		return 0;
2173
2174	if (appl_ptr >= runtime->boundary)
2175		return -EINVAL;
2176	/*
2177	 * check if a rewind is requested by the application
2178	 */
2179	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2180		diff = appl_ptr - old_appl_ptr;
2181		if (diff >= 0) {
2182			if (diff > runtime->buffer_size)
2183				return -EINVAL;
2184		} else {
2185			if (runtime->boundary + diff > runtime->buffer_size)
2186				return -EINVAL;
2187		}
2188	}
2189
2190	runtime->control->appl_ptr = appl_ptr;
2191	if (substream->ops->ack) {
2192		ret = substream->ops->ack(substream);
2193		if (ret < 0) {
2194			runtime->control->appl_ptr = old_appl_ptr;
2195			if (ret == -EPIPE)
2196				__snd_pcm_xrun(substream);
2197			return ret;
2198		}
2199	}
2200
2201	trace_applptr(substream, old_appl_ptr, appl_ptr);
2202
2203	return 0;
2204}
2205
2206/* the common loop for read/write data */
2207snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2208				     void *data, bool interleaved,
2209				     snd_pcm_uframes_t size, bool in_kernel)
2210{
2211	struct snd_pcm_runtime *runtime = substream->runtime;
2212	snd_pcm_uframes_t xfer = 0;
2213	snd_pcm_uframes_t offset = 0;
2214	snd_pcm_uframes_t avail;
2215	pcm_copy_f writer;
2216	pcm_transfer_f transfer;
2217	bool nonblock;
2218	bool is_playback;
2219	int err;
2220
2221	err = pcm_sanity_check(substream);
2222	if (err < 0)
2223		return err;
2224
2225	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2226	if (interleaved) {
2227		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2228		    runtime->channels > 1)
2229			return -EINVAL;
2230		writer = interleaved_copy;
2231	} else {
2232		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2233			return -EINVAL;
2234		writer = noninterleaved_copy;
2235	}
2236
2237	if (!data) {
2238		if (is_playback)
2239			transfer = fill_silence;
2240		else
2241			return -EINVAL;
2242	} else {
2243		if (substream->ops->copy)
2244			transfer = substream->ops->copy;
2245		else
2246			transfer = is_playback ?
2247				default_write_copy : default_read_copy;
2248	}
2249
2250	if (size == 0)
2251		return 0;
2252
2253	nonblock = !!(substream->f_flags & O_NONBLOCK);
2254
2255	snd_pcm_stream_lock_irq(substream);
2256	err = pcm_accessible_state(runtime);
2257	if (err < 0)
2258		goto _end_unlock;
2259
2260	runtime->twake = runtime->control->avail_min ? : 1;
2261	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2262		snd_pcm_update_hw_ptr(substream);
2263
2264	/*
2265	 * If size < start_threshold, wait indefinitely. Another
2266	 * thread may start capture
2267	 */
2268	if (!is_playback &&
2269	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2270	    size >= runtime->start_threshold) {
2271		err = snd_pcm_start(substream);
2272		if (err < 0)
2273			goto _end_unlock;
2274	}
2275
2276	avail = snd_pcm_avail(substream);
2277
2278	while (size > 0) {
2279		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2280		snd_pcm_uframes_t cont;
2281		if (!avail) {
2282			if (!is_playback &&
2283			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2284				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2285				goto _end_unlock;
2286			}
2287			if (nonblock) {
2288				err = -EAGAIN;
2289				goto _end_unlock;
2290			}
2291			runtime->twake = min_t(snd_pcm_uframes_t, size,
2292					runtime->control->avail_min ? : 1);
2293			err = wait_for_avail(substream, &avail);
2294			if (err < 0)
2295				goto _end_unlock;
2296			if (!avail)
2297				continue; /* draining */
2298		}
2299		frames = size > avail ? avail : size;
2300		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2301		appl_ofs = appl_ptr % runtime->buffer_size;
2302		cont = runtime->buffer_size - appl_ofs;
2303		if (frames > cont)
2304			frames = cont;
2305		if (snd_BUG_ON(!frames)) {
2306			err = -EINVAL;
2307			goto _end_unlock;
2308		}
2309		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2310			err = -EBUSY;
2311			goto _end_unlock;
2312		}
2313		snd_pcm_stream_unlock_irq(substream);
2314		if (!is_playback)
2315			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2316		err = writer(substream, appl_ofs, data, offset, frames,
2317			     transfer, in_kernel);
2318		if (is_playback)
2319			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2320		snd_pcm_stream_lock_irq(substream);
2321		atomic_dec(&runtime->buffer_accessing);
2322		if (err < 0)
2323			goto _end_unlock;
2324		err = pcm_accessible_state(runtime);
2325		if (err < 0)
2326			goto _end_unlock;
2327		appl_ptr += frames;
2328		if (appl_ptr >= runtime->boundary)
2329			appl_ptr -= runtime->boundary;
2330		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2331		if (err < 0)
2332			goto _end_unlock;
2333
2334		offset += frames;
2335		size -= frames;
2336		xfer += frames;
2337		avail -= frames;
2338		if (is_playback &&
2339		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2340		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2341			err = snd_pcm_start(substream);
2342			if (err < 0)
2343				goto _end_unlock;
2344		}
2345	}
2346 _end_unlock:
2347	runtime->twake = 0;
2348	if (xfer > 0 && err >= 0)
2349		snd_pcm_update_state(substream, runtime);
2350	snd_pcm_stream_unlock_irq(substream);
2351	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2352}
2353EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2354
2355/*
2356 * standard channel mapping helpers
2357 */
2358
2359/* default channel maps for multi-channel playbacks, up to 8 channels */
2360const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2361	{ .channels = 1,
2362	  .map = { SNDRV_CHMAP_MONO } },
2363	{ .channels = 2,
2364	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2365	{ .channels = 4,
2366	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2367		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2368	{ .channels = 6,
2369	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2370		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2371		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2372	{ .channels = 8,
2373	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2374		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2375		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2376		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2377	{ }
2378};
2379EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2380
2381/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2382const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2383	{ .channels = 1,
2384	  .map = { SNDRV_CHMAP_MONO } },
2385	{ .channels = 2,
2386	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2387	{ .channels = 4,
2388	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2389		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2390	{ .channels = 6,
2391	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2392		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2393		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2394	{ .channels = 8,
2395	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2396		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2397		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2398		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2399	{ }
2400};
2401EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2402
2403static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2404{
2405	if (ch > info->max_channels)
2406		return false;
2407	return !info->channel_mask || (info->channel_mask & (1U << ch));
2408}
2409
2410static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2411			      struct snd_ctl_elem_info *uinfo)
2412{
2413	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2414
2415	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2416	uinfo->count = info->max_channels;
2417	uinfo->value.integer.min = 0;
2418	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2419	return 0;
2420}
2421
2422/* get callback for channel map ctl element
2423 * stores the channel position firstly matching with the current channels
2424 */
2425static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2426			     struct snd_ctl_elem_value *ucontrol)
2427{
2428	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2429	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2430	struct snd_pcm_substream *substream;
2431	const struct snd_pcm_chmap_elem *map;
2432
2433	if (!info->chmap)
2434		return -EINVAL;
2435	substream = snd_pcm_chmap_substream(info, idx);
2436	if (!substream)
2437		return -ENODEV;
2438	memset(ucontrol->value.integer.value, 0,
2439	       sizeof(long) * info->max_channels);
2440	if (!substream->runtime)
2441		return 0; /* no channels set */
2442	for (map = info->chmap; map->channels; map++) {
2443		int i;
2444		if (map->channels == substream->runtime->channels &&
2445		    valid_chmap_channels(info, map->channels)) {
2446			for (i = 0; i < map->channels; i++)
2447				ucontrol->value.integer.value[i] = map->map[i];
2448			return 0;
2449		}
2450	}
2451	return -EINVAL;
2452}
2453
2454/* tlv callback for channel map ctl element
2455 * expands the pre-defined channel maps in a form of TLV
2456 */
2457static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2458			     unsigned int size, unsigned int __user *tlv)
2459{
2460	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2461	const struct snd_pcm_chmap_elem *map;
2462	unsigned int __user *dst;
2463	int c, count = 0;
2464
2465	if (!info->chmap)
2466		return -EINVAL;
2467	if (size < 8)
2468		return -ENOMEM;
2469	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2470		return -EFAULT;
2471	size -= 8;
2472	dst = tlv + 2;
2473	for (map = info->chmap; map->channels; map++) {
2474		int chs_bytes = map->channels * 4;
2475		if (!valid_chmap_channels(info, map->channels))
2476			continue;
2477		if (size < 8)
2478			return -ENOMEM;
2479		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2480		    put_user(chs_bytes, dst + 1))
2481			return -EFAULT;
2482		dst += 2;
2483		size -= 8;
2484		count += 8;
2485		if (size < chs_bytes)
2486			return -ENOMEM;
2487		size -= chs_bytes;
2488		count += chs_bytes;
2489		for (c = 0; c < map->channels; c++) {
2490			if (put_user(map->map[c], dst))
2491				return -EFAULT;
2492			dst++;
2493		}
2494	}
2495	if (put_user(count, tlv + 1))
2496		return -EFAULT;
2497	return 0;
2498}
2499
2500static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2501{
2502	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2503	info->pcm->streams[info->stream].chmap_kctl = NULL;
2504	kfree(info);
2505}
2506
2507/**
2508 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2509 * @pcm: the assigned PCM instance
2510 * @stream: stream direction
2511 * @chmap: channel map elements (for query)
2512 * @max_channels: the max number of channels for the stream
2513 * @private_value: the value passed to each kcontrol's private_value field
2514 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2515 *
2516 * Create channel-mapping control elements assigned to the given PCM stream(s).
2517 * Return: Zero if successful, or a negative error value.
2518 */
2519int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2520			   const struct snd_pcm_chmap_elem *chmap,
2521			   int max_channels,
2522			   unsigned long private_value,
2523			   struct snd_pcm_chmap **info_ret)
2524{
2525	struct snd_pcm_chmap *info;
2526	struct snd_kcontrol_new knew = {
2527		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2528		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2529			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2530			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2531		.info = pcm_chmap_ctl_info,
2532		.get = pcm_chmap_ctl_get,
2533		.tlv.c = pcm_chmap_ctl_tlv,
2534	};
2535	int err;
2536
2537	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2538		return -EBUSY;
2539	info = kzalloc(sizeof(*info), GFP_KERNEL);
2540	if (!info)
2541		return -ENOMEM;
2542	info->pcm = pcm;
2543	info->stream = stream;
2544	info->chmap = chmap;
2545	info->max_channels = max_channels;
2546	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2547		knew.name = "Playback Channel Map";
2548	else
2549		knew.name = "Capture Channel Map";
2550	knew.device = pcm->device;
2551	knew.count = pcm->streams[stream].substream_count;
2552	knew.private_value = private_value;
2553	info->kctl = snd_ctl_new1(&knew, info);
2554	if (!info->kctl) {
2555		kfree(info);
2556		return -ENOMEM;
2557	}
2558	info->kctl->private_free = pcm_chmap_ctl_private_free;
2559	err = snd_ctl_add(pcm->card, info->kctl);
2560	if (err < 0)
2561		return err;
2562	pcm->streams[stream].chmap_kctl = info->kctl;
2563	if (info_ret)
2564		*info_ret = info;
2565	return 0;
2566}
2567EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
2568