1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  compress_core.c - compress offload core
4 *
5 *  Copyright (C) 2011 Intel Corporation
6 *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
7 *		Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 */
12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/list.h>
18#include <linux/math64.h>
19#include <linux/mm.h>
20#include <linux/mutex.h>
21#include <linux/poll.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/uio.h>
26#include <linux/uaccess.h>
27#include <linux/module.h>
28#include <linux/compat.h>
29#include <sound/core.h>
30#include <sound/initval.h>
31#include <sound/info.h>
32#include <sound/compress_params.h>
33#include <sound/compress_offload.h>
34#include <sound/compress_driver.h>
35
36/* struct snd_compr_codec_caps overflows the ioctl bit size for some
37 * architectures, so we need to disable the relevant ioctls.
38 */
39#if _IOC_SIZEBITS < 14
40#define COMPR_CODEC_CAPS_OVERFLOW
41#endif
42
43/* TODO:
44 * - add substream support for multiple devices in case of
45 *	SND_DYNAMIC_MINORS is not used
46 * - Multiple node representation
47 *	driver should be able to register multiple nodes
48 */
49
50struct snd_compr_file {
51	unsigned long caps;
52	struct snd_compr_stream stream;
53};
54
55static void error_delayed_work(struct work_struct *work);
56
57/*
58 * a note on stream states used:
59 * we use following states in the compressed core
60 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
61 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
62 *	calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
63 *	state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
64 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
65 *	playback only). User after setting up stream writes the data buffer
66 *	before starting the stream.
67 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
68 *	decoding/encoding and rendering/capturing data.
69 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
70 *	by calling SNDRV_COMPRESS_DRAIN.
71 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
72 *	SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
73 *	SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
74 */
75static int snd_compr_open(struct inode *inode, struct file *f)
76{
77	struct snd_compr *compr;
78	struct snd_compr_file *data;
79	struct snd_compr_runtime *runtime;
80	enum snd_compr_direction dirn;
81	int maj = imajor(inode);
82	int ret;
83
84	if ((f->f_flags & O_ACCMODE) == O_WRONLY)
85		dirn = SND_COMPRESS_PLAYBACK;
86	else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
87		dirn = SND_COMPRESS_CAPTURE;
88	else
89		return -EINVAL;
90
91	if (maj == snd_major)
92		compr = snd_lookup_minor_data(iminor(inode),
93					SNDRV_DEVICE_TYPE_COMPRESS);
94	else
95		return -EBADFD;
96
97	if (compr == NULL) {
98		pr_err("no device data!!!\n");
99		return -ENODEV;
100	}
101
102	if (dirn != compr->direction) {
103		pr_err("this device doesn't support this direction\n");
104		snd_card_unref(compr->card);
105		return -EINVAL;
106	}
107
108	data = kzalloc(sizeof(*data), GFP_KERNEL);
109	if (!data) {
110		snd_card_unref(compr->card);
111		return -ENOMEM;
112	}
113
114	INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
115
116	data->stream.ops = compr->ops;
117	data->stream.direction = dirn;
118	data->stream.private_data = compr->private_data;
119	data->stream.device = compr;
120	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
121	if (!runtime) {
122		kfree(data);
123		snd_card_unref(compr->card);
124		return -ENOMEM;
125	}
126	runtime->state = SNDRV_PCM_STATE_OPEN;
127	init_waitqueue_head(&runtime->sleep);
128	data->stream.runtime = runtime;
129	f->private_data = (void *)data;
130	mutex_lock(&compr->lock);
131	ret = compr->ops->open(&data->stream);
132	mutex_unlock(&compr->lock);
133	if (ret) {
134		kfree(runtime);
135		kfree(data);
136	}
137	snd_card_unref(compr->card);
138	return ret;
139}
140
141static int snd_compr_free(struct inode *inode, struct file *f)
142{
143	struct snd_compr_file *data = f->private_data;
144	struct snd_compr_runtime *runtime = data->stream.runtime;
145
146	cancel_delayed_work_sync(&data->stream.error_work);
147
148	switch (runtime->state) {
149	case SNDRV_PCM_STATE_RUNNING:
150	case SNDRV_PCM_STATE_DRAINING:
151	case SNDRV_PCM_STATE_PAUSED:
152		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
153		break;
154	default:
155		break;
156	}
157
158	data->stream.ops->free(&data->stream);
159	if (!data->stream.runtime->dma_buffer_p)
160		kfree(data->stream.runtime->buffer);
161	kfree(data->stream.runtime);
162	kfree(data);
163	return 0;
164}
165
166static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
167		struct snd_compr_tstamp *tstamp)
168{
169	if (!stream->ops->pointer)
170		return -ENOTSUPP;
171	stream->ops->pointer(stream, tstamp);
172	pr_debug("dsp consumed till %d total %d bytes\n",
173		tstamp->byte_offset, tstamp->copied_total);
174	if (stream->direction == SND_COMPRESS_PLAYBACK)
175		stream->runtime->total_bytes_transferred = tstamp->copied_total;
176	else
177		stream->runtime->total_bytes_available = tstamp->copied_total;
178	return 0;
179}
180
181static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
182		struct snd_compr_avail *avail)
183{
184	memset(avail, 0, sizeof(*avail));
185	snd_compr_update_tstamp(stream, &avail->tstamp);
186	/* Still need to return avail even if tstamp can't be filled in */
187
188	if (stream->runtime->total_bytes_available == 0 &&
189			stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
190			stream->direction == SND_COMPRESS_PLAYBACK) {
191		pr_debug("detected init and someone forgot to do a write\n");
192		return stream->runtime->buffer_size;
193	}
194	pr_debug("app wrote %lld, DSP consumed %lld\n",
195			stream->runtime->total_bytes_available,
196			stream->runtime->total_bytes_transferred);
197	if (stream->runtime->total_bytes_available ==
198				stream->runtime->total_bytes_transferred) {
199		if (stream->direction == SND_COMPRESS_PLAYBACK) {
200			pr_debug("both pointers are same, returning full avail\n");
201			return stream->runtime->buffer_size;
202		} else {
203			pr_debug("both pointers are same, returning no avail\n");
204			return 0;
205		}
206	}
207
208	avail->avail = stream->runtime->total_bytes_available -
209			stream->runtime->total_bytes_transferred;
210	if (stream->direction == SND_COMPRESS_PLAYBACK)
211		avail->avail = stream->runtime->buffer_size - avail->avail;
212
213	pr_debug("ret avail as %lld\n", avail->avail);
214	return avail->avail;
215}
216
217static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
218{
219	struct snd_compr_avail avail;
220
221	return snd_compr_calc_avail(stream, &avail);
222}
223
224static int
225snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
226{
227	struct snd_compr_avail ioctl_avail;
228	size_t avail;
229
230	avail = snd_compr_calc_avail(stream, &ioctl_avail);
231	ioctl_avail.avail = avail;
232
233	switch (stream->runtime->state) {
234	case SNDRV_PCM_STATE_OPEN:
235		return -EBADFD;
236	case SNDRV_PCM_STATE_XRUN:
237		return -EPIPE;
238	default:
239		break;
240	}
241
242	if (copy_to_user((__u64 __user *)arg,
243				&ioctl_avail, sizeof(ioctl_avail)))
244		return -EFAULT;
245	return 0;
246}
247
248static int snd_compr_write_data(struct snd_compr_stream *stream,
249	       const char __user *buf, size_t count)
250{
251	void *dstn;
252	size_t copy;
253	struct snd_compr_runtime *runtime = stream->runtime;
254	/* 64-bit Modulus */
255	u64 app_pointer = div64_u64(runtime->total_bytes_available,
256				    runtime->buffer_size);
257	app_pointer = runtime->total_bytes_available -
258		      (app_pointer * runtime->buffer_size);
259
260	dstn = runtime->buffer + app_pointer;
261	pr_debug("copying %ld at %lld\n",
262			(unsigned long)count, app_pointer);
263	if (count < runtime->buffer_size - app_pointer) {
264		if (copy_from_user(dstn, buf, count))
265			return -EFAULT;
266	} else {
267		copy = runtime->buffer_size - app_pointer;
268		if (copy_from_user(dstn, buf, copy))
269			return -EFAULT;
270		if (copy_from_user(runtime->buffer, buf + copy, count - copy))
271			return -EFAULT;
272	}
273	/* if DSP cares, let it know data has been written */
274	if (stream->ops->ack)
275		stream->ops->ack(stream, count);
276	return count;
277}
278
279static ssize_t snd_compr_write(struct file *f, const char __user *buf,
280		size_t count, loff_t *offset)
281{
282	struct snd_compr_file *data = f->private_data;
283	struct snd_compr_stream *stream;
284	size_t avail;
285	int retval;
286
287	if (snd_BUG_ON(!data))
288		return -EFAULT;
289
290	stream = &data->stream;
291	mutex_lock(&stream->device->lock);
292	/* write is allowed when stream is running or has been steup */
293	switch (stream->runtime->state) {
294	case SNDRV_PCM_STATE_SETUP:
295	case SNDRV_PCM_STATE_PREPARED:
296	case SNDRV_PCM_STATE_RUNNING:
297		break;
298	default:
299		mutex_unlock(&stream->device->lock);
300		return -EBADFD;
301	}
302
303	avail = snd_compr_get_avail(stream);
304	pr_debug("avail returned %ld\n", (unsigned long)avail);
305	/* calculate how much we can write to buffer */
306	if (avail > count)
307		avail = count;
308
309	if (stream->ops->copy) {
310		char __user* cbuf = (char __user*)buf;
311		retval = stream->ops->copy(stream, cbuf, avail);
312	} else {
313		retval = snd_compr_write_data(stream, buf, avail);
314	}
315	if (retval > 0)
316		stream->runtime->total_bytes_available += retval;
317
318	/* while initiating the stream, write should be called before START
319	 * call, so in setup move state */
320	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
321		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
322		pr_debug("stream prepared, Houston we are good to go\n");
323	}
324
325	mutex_unlock(&stream->device->lock);
326	return retval;
327}
328
329
330static ssize_t snd_compr_read(struct file *f, char __user *buf,
331		size_t count, loff_t *offset)
332{
333	struct snd_compr_file *data = f->private_data;
334	struct snd_compr_stream *stream;
335	size_t avail;
336	int retval;
337
338	if (snd_BUG_ON(!data))
339		return -EFAULT;
340
341	stream = &data->stream;
342	mutex_lock(&stream->device->lock);
343
344	/* read is allowed when stream is running, paused, draining and setup
345	 * (yes setup is state which we transition to after stop, so if user
346	 * wants to read data after stop we allow that)
347	 */
348	switch (stream->runtime->state) {
349	case SNDRV_PCM_STATE_OPEN:
350	case SNDRV_PCM_STATE_PREPARED:
351	case SNDRV_PCM_STATE_SUSPENDED:
352	case SNDRV_PCM_STATE_DISCONNECTED:
353		retval = -EBADFD;
354		goto out;
355	case SNDRV_PCM_STATE_XRUN:
356		retval = -EPIPE;
357		goto out;
358	}
359
360	avail = snd_compr_get_avail(stream);
361	pr_debug("avail returned %ld\n", (unsigned long)avail);
362	/* calculate how much we can read from buffer */
363	if (avail > count)
364		avail = count;
365
366	if (stream->ops->copy) {
367		retval = stream->ops->copy(stream, buf, avail);
368	} else {
369		retval = -ENXIO;
370		goto out;
371	}
372	if (retval > 0)
373		stream->runtime->total_bytes_transferred += retval;
374
375out:
376	mutex_unlock(&stream->device->lock);
377	return retval;
378}
379
380static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
381{
382	return -ENXIO;
383}
384
385static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
386{
387	if (stream->direction == SND_COMPRESS_PLAYBACK)
388		return EPOLLOUT | EPOLLWRNORM;
389	else
390		return EPOLLIN | EPOLLRDNORM;
391}
392
393static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
394{
395	struct snd_compr_file *data = f->private_data;
396	struct snd_compr_stream *stream;
397	size_t avail;
398	__poll_t retval = 0;
399
400	if (snd_BUG_ON(!data))
401		return EPOLLERR;
402
403	stream = &data->stream;
404
405	mutex_lock(&stream->device->lock);
406
407	switch (stream->runtime->state) {
408	case SNDRV_PCM_STATE_OPEN:
409	case SNDRV_PCM_STATE_XRUN:
410		retval = snd_compr_get_poll(stream) | EPOLLERR;
411		goto out;
412	default:
413		break;
414	}
415
416	poll_wait(f, &stream->runtime->sleep, wait);
417
418	avail = snd_compr_get_avail(stream);
419	pr_debug("avail is %ld\n", (unsigned long)avail);
420	/* check if we have at least one fragment to fill */
421	switch (stream->runtime->state) {
422	case SNDRV_PCM_STATE_DRAINING:
423		/* stream has been woken up after drain is complete
424		 * draining done so set stream state to stopped
425		 */
426		retval = snd_compr_get_poll(stream);
427		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
428		break;
429	case SNDRV_PCM_STATE_RUNNING:
430	case SNDRV_PCM_STATE_PREPARED:
431	case SNDRV_PCM_STATE_PAUSED:
432		if (avail >= stream->runtime->fragment_size)
433			retval = snd_compr_get_poll(stream);
434		break;
435	default:
436		retval = snd_compr_get_poll(stream) | EPOLLERR;
437		break;
438	}
439out:
440	mutex_unlock(&stream->device->lock);
441	return retval;
442}
443
444static int
445snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
446{
447	int retval;
448	struct snd_compr_caps caps;
449
450	if (!stream->ops->get_caps)
451		return -ENXIO;
452
453	memset(&caps, 0, sizeof(caps));
454	retval = stream->ops->get_caps(stream, &caps);
455	if (retval)
456		goto out;
457	if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
458		retval = -EFAULT;
459out:
460	return retval;
461}
462
463#ifndef COMPR_CODEC_CAPS_OVERFLOW
464static int
465snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
466{
467	int retval;
468	struct snd_compr_codec_caps *caps;
469
470	if (!stream->ops->get_codec_caps)
471		return -ENXIO;
472
473	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
474	if (!caps)
475		return -ENOMEM;
476
477	retval = stream->ops->get_codec_caps(stream, caps);
478	if (retval)
479		goto out;
480	if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
481		retval = -EFAULT;
482
483out:
484	kfree(caps);
485	return retval;
486}
487#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
488
489int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
490{
491	struct snd_dma_buffer *dmab;
492	int ret;
493
494	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
495		return -EINVAL;
496	dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
497	if (!dmab)
498		return -ENOMEM;
499	dmab->dev = stream->dma_buffer.dev;
500	ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
501	if (ret < 0) {
502		kfree(dmab);
503		return ret;
504	}
505
506	snd_compr_set_runtime_buffer(stream, dmab);
507	stream->runtime->dma_bytes = size;
508	return 1;
509}
510EXPORT_SYMBOL(snd_compr_malloc_pages);
511
512int snd_compr_free_pages(struct snd_compr_stream *stream)
513{
514	struct snd_compr_runtime *runtime;
515
516	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
517		return -EINVAL;
518	runtime = stream->runtime;
519	if (runtime->dma_area == NULL)
520		return 0;
521	if (runtime->dma_buffer_p != &stream->dma_buffer) {
522		/* It's a newly allocated buffer. Release it now. */
523		snd_dma_free_pages(runtime->dma_buffer_p);
524		kfree(runtime->dma_buffer_p);
525	}
526
527	snd_compr_set_runtime_buffer(stream, NULL);
528	return 0;
529}
530EXPORT_SYMBOL(snd_compr_free_pages);
531
532/* revisit this with snd_pcm_preallocate_xxx */
533static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
534		struct snd_compr_params *params)
535{
536	unsigned int buffer_size;
537	void *buffer = NULL;
538
539	buffer_size = params->buffer.fragment_size * params->buffer.fragments;
540	if (stream->ops->copy) {
541		buffer = NULL;
542		/* if copy is defined the driver will be required to copy
543		 * the data from core
544		 */
545	} else {
546		if (stream->runtime->dma_buffer_p) {
547
548			if (buffer_size > stream->runtime->dma_buffer_p->bytes)
549				dev_err(stream->device->dev,
550						"Not enough DMA buffer");
551			else
552				buffer = stream->runtime->dma_buffer_p->area;
553
554		} else {
555			buffer = kmalloc(buffer_size, GFP_KERNEL);
556		}
557
558		if (!buffer)
559			return -ENOMEM;
560	}
561	stream->runtime->fragment_size = params->buffer.fragment_size;
562	stream->runtime->fragments = params->buffer.fragments;
563	stream->runtime->buffer = buffer;
564	stream->runtime->buffer_size = buffer_size;
565	return 0;
566}
567
568static int snd_compress_check_input(struct snd_compr_params *params)
569{
570	/* first let's check the buffer parameter's */
571	if (params->buffer.fragment_size == 0 ||
572	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
573	    params->buffer.fragments == 0)
574		return -EINVAL;
575
576	/* now codec parameters */
577	if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
578		return -EINVAL;
579
580	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
581		return -EINVAL;
582
583	return 0;
584}
585
586static int
587snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
588{
589	struct snd_compr_params *params;
590	int retval;
591
592	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
593		/*
594		 * we should allow parameter change only when stream has been
595		 * opened not in other cases
596		 */
597		params = memdup_user((void __user *)arg, sizeof(*params));
598		if (IS_ERR(params))
599			return PTR_ERR(params);
600
601		retval = snd_compress_check_input(params);
602		if (retval)
603			goto out;
604
605		retval = snd_compr_allocate_buffer(stream, params);
606		if (retval) {
607			retval = -ENOMEM;
608			goto out;
609		}
610
611		retval = stream->ops->set_params(stream, params);
612		if (retval)
613			goto out;
614
615		if (stream->next_track)
616			goto out;
617
618		stream->metadata_set = false;
619		stream->next_track = false;
620
621		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
622	} else {
623		return -EPERM;
624	}
625out:
626	kfree(params);
627	return retval;
628}
629
630static int
631snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
632{
633	struct snd_codec *params;
634	int retval;
635
636	if (!stream->ops->get_params)
637		return -EBADFD;
638
639	params = kzalloc(sizeof(*params), GFP_KERNEL);
640	if (!params)
641		return -ENOMEM;
642	retval = stream->ops->get_params(stream, params);
643	if (retval)
644		goto out;
645	if (copy_to_user((char __user *)arg, params, sizeof(*params)))
646		retval = -EFAULT;
647
648out:
649	kfree(params);
650	return retval;
651}
652
653static int
654snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
655{
656	struct snd_compr_metadata metadata;
657	int retval;
658
659	if (!stream->ops->get_metadata)
660		return -ENXIO;
661
662	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
663		return -EFAULT;
664
665	retval = stream->ops->get_metadata(stream, &metadata);
666	if (retval != 0)
667		return retval;
668
669	if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
670		return -EFAULT;
671
672	return 0;
673}
674
675static int
676snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
677{
678	struct snd_compr_metadata metadata;
679	int retval;
680
681	if (!stream->ops->set_metadata)
682		return -ENXIO;
683	/*
684	* we should allow parameter change only when stream has been
685	* opened not in other cases
686	*/
687	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
688		return -EFAULT;
689
690	retval = stream->ops->set_metadata(stream, &metadata);
691	stream->metadata_set = true;
692
693	return retval;
694}
695
696static inline int
697snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
698{
699	struct snd_compr_tstamp tstamp = {0};
700	int ret;
701
702	ret = snd_compr_update_tstamp(stream, &tstamp);
703	if (ret == 0)
704		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
705			&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
706	return ret;
707}
708
709static int snd_compr_pause(struct snd_compr_stream *stream)
710{
711	int retval;
712
713	switch (stream->runtime->state) {
714	case SNDRV_PCM_STATE_RUNNING:
715		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
716		if (!retval)
717			stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
718		break;
719	case SNDRV_PCM_STATE_DRAINING:
720		if (!stream->device->use_pause_in_draining)
721			return -EPERM;
722		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
723		if (!retval)
724			stream->pause_in_draining = true;
725		break;
726	default:
727		return -EPERM;
728	}
729	return retval;
730}
731
732static int snd_compr_resume(struct snd_compr_stream *stream)
733{
734	int retval;
735
736	switch (stream->runtime->state) {
737	case SNDRV_PCM_STATE_PAUSED:
738		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
739		if (!retval)
740			stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
741		break;
742	case SNDRV_PCM_STATE_DRAINING:
743		if (!stream->pause_in_draining)
744			return -EPERM;
745		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
746		if (!retval)
747			stream->pause_in_draining = false;
748		break;
749	default:
750		return -EPERM;
751	}
752	return retval;
753}
754
755static int snd_compr_start(struct snd_compr_stream *stream)
756{
757	int retval;
758
759	switch (stream->runtime->state) {
760	case SNDRV_PCM_STATE_SETUP:
761		if (stream->direction != SND_COMPRESS_CAPTURE)
762			return -EPERM;
763		break;
764	case SNDRV_PCM_STATE_PREPARED:
765		break;
766	default:
767		return -EPERM;
768	}
769
770	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
771	if (!retval)
772		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
773	return retval;
774}
775
776static int snd_compr_stop(struct snd_compr_stream *stream)
777{
778	int retval;
779
780	switch (stream->runtime->state) {
781	case SNDRV_PCM_STATE_OPEN:
782	case SNDRV_PCM_STATE_SETUP:
783	case SNDRV_PCM_STATE_PREPARED:
784		return -EPERM;
785	default:
786		break;
787	}
788
789	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
790	if (!retval) {
791		/* clear flags and stop any drain wait */
792		stream->partial_drain = false;
793		stream->metadata_set = false;
794		stream->pause_in_draining = false;
795		snd_compr_drain_notify(stream);
796		stream->runtime->total_bytes_available = 0;
797		stream->runtime->total_bytes_transferred = 0;
798	}
799	return retval;
800}
801
802static void error_delayed_work(struct work_struct *work)
803{
804	struct snd_compr_stream *stream;
805
806	stream = container_of(work, struct snd_compr_stream, error_work.work);
807
808	mutex_lock(&stream->device->lock);
809
810	stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
811	wake_up(&stream->runtime->sleep);
812
813	mutex_unlock(&stream->device->lock);
814}
815
816/**
817 * snd_compr_stop_error: Report a fatal error on a stream
818 * @stream: pointer to stream
819 * @state: state to transition the stream to
820 *
821 * Stop the stream and set its state.
822 *
823 * Should be called with compressed device lock held.
824 *
825 * Return: zero if successful, or a negative error code
826 */
827int snd_compr_stop_error(struct snd_compr_stream *stream,
828			 snd_pcm_state_t state)
829{
830	if (stream->runtime->state == state)
831		return 0;
832
833	stream->runtime->state = state;
834
835	pr_debug("Changing state to: %d\n", state);
836
837	queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
838
839	return 0;
840}
841EXPORT_SYMBOL_GPL(snd_compr_stop_error);
842
843static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
844{
845	int ret;
846
847	/*
848	 * We are called with lock held. So drop the lock while we wait for
849	 * drain complete notification from the driver
850	 *
851	 * It is expected that driver will notify the drain completion and then
852	 * stream will be moved to SETUP state, even if draining resulted in an
853	 * error. We can trigger next track after this.
854	 */
855	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
856	mutex_unlock(&stream->device->lock);
857
858	/* we wait for drain to complete here, drain can return when
859	 * interruption occurred, wait returned error or success.
860	 * For the first two cases we don't do anything different here and
861	 * return after waking up
862	 */
863
864	ret = wait_event_interruptible(stream->runtime->sleep,
865			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
866	if (ret == -ERESTARTSYS)
867		pr_debug("wait aborted by a signal\n");
868	else if (ret)
869		pr_debug("wait for drain failed with %d\n", ret);
870
871
872	wake_up(&stream->runtime->sleep);
873	mutex_lock(&stream->device->lock);
874
875	return ret;
876}
877
878static int snd_compr_drain(struct snd_compr_stream *stream)
879{
880	int retval;
881
882	switch (stream->runtime->state) {
883	case SNDRV_PCM_STATE_OPEN:
884	case SNDRV_PCM_STATE_SETUP:
885	case SNDRV_PCM_STATE_PREPARED:
886	case SNDRV_PCM_STATE_PAUSED:
887		return -EPERM;
888	case SNDRV_PCM_STATE_XRUN:
889		return -EPIPE;
890	default:
891		break;
892	}
893
894	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
895	if (retval) {
896		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
897		wake_up(&stream->runtime->sleep);
898		return retval;
899	}
900
901	return snd_compress_wait_for_drain(stream);
902}
903
904static int snd_compr_next_track(struct snd_compr_stream *stream)
905{
906	int retval;
907
908	/* only a running stream can transition to next track */
909	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
910		return -EPERM;
911
912	/* next track doesn't have any meaning for capture streams */
913	if (stream->direction == SND_COMPRESS_CAPTURE)
914		return -EPERM;
915
916	/* you can signal next track if this is intended to be a gapless stream
917	 * and current track metadata is set
918	 */
919	if (stream->metadata_set == false)
920		return -EPERM;
921
922	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
923	if (retval != 0)
924		return retval;
925	stream->metadata_set = false;
926	stream->next_track = true;
927	return 0;
928}
929
930static int snd_compr_partial_drain(struct snd_compr_stream *stream)
931{
932	int retval;
933
934	switch (stream->runtime->state) {
935	case SNDRV_PCM_STATE_OPEN:
936	case SNDRV_PCM_STATE_SETUP:
937	case SNDRV_PCM_STATE_PREPARED:
938	case SNDRV_PCM_STATE_PAUSED:
939		return -EPERM;
940	case SNDRV_PCM_STATE_XRUN:
941		return -EPIPE;
942	default:
943		break;
944	}
945
946	/* partial drain doesn't have any meaning for capture streams */
947	if (stream->direction == SND_COMPRESS_CAPTURE)
948		return -EPERM;
949
950	/* stream can be drained only when next track has been signalled */
951	if (stream->next_track == false)
952		return -EPERM;
953
954	stream->partial_drain = true;
955	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
956	if (retval) {
957		pr_debug("Partial drain returned failure\n");
958		wake_up(&stream->runtime->sleep);
959		return retval;
960	}
961
962	stream->next_track = false;
963	return snd_compress_wait_for_drain(stream);
964}
965
966static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
967{
968	struct snd_compr_file *data = f->private_data;
969	struct snd_compr_stream *stream;
970	int retval = -ENOTTY;
971
972	if (snd_BUG_ON(!data))
973		return -EFAULT;
974
975	stream = &data->stream;
976
977	mutex_lock(&stream->device->lock);
978	switch (_IOC_NR(cmd)) {
979	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
980		retval = put_user(SNDRV_COMPRESS_VERSION,
981				(int __user *)arg) ? -EFAULT : 0;
982		break;
983	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
984		retval = snd_compr_get_caps(stream, arg);
985		break;
986#ifndef COMPR_CODEC_CAPS_OVERFLOW
987	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
988		retval = snd_compr_get_codec_caps(stream, arg);
989		break;
990#endif
991	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
992		retval = snd_compr_set_params(stream, arg);
993		break;
994	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
995		retval = snd_compr_get_params(stream, arg);
996		break;
997	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
998		retval = snd_compr_set_metadata(stream, arg);
999		break;
1000	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1001		retval = snd_compr_get_metadata(stream, arg);
1002		break;
1003	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1004		retval = snd_compr_tstamp(stream, arg);
1005		break;
1006	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1007		retval = snd_compr_ioctl_avail(stream, arg);
1008		break;
1009	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1010		retval = snd_compr_pause(stream);
1011		break;
1012	case _IOC_NR(SNDRV_COMPRESS_RESUME):
1013		retval = snd_compr_resume(stream);
1014		break;
1015	case _IOC_NR(SNDRV_COMPRESS_START):
1016		retval = snd_compr_start(stream);
1017		break;
1018	case _IOC_NR(SNDRV_COMPRESS_STOP):
1019		retval = snd_compr_stop(stream);
1020		break;
1021	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1022		retval = snd_compr_drain(stream);
1023		break;
1024	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1025		retval = snd_compr_partial_drain(stream);
1026		break;
1027	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1028		retval = snd_compr_next_track(stream);
1029		break;
1030
1031	}
1032	mutex_unlock(&stream->device->lock);
1033	return retval;
1034}
1035
1036/* support of 32bit userspace on 64bit platforms */
1037#ifdef CONFIG_COMPAT
1038static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1039						unsigned long arg)
1040{
1041	return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1042}
1043#endif
1044
1045static const struct file_operations snd_compr_file_ops = {
1046		.owner =	THIS_MODULE,
1047		.open =		snd_compr_open,
1048		.release =	snd_compr_free,
1049		.write =	snd_compr_write,
1050		.read =		snd_compr_read,
1051		.unlocked_ioctl = snd_compr_ioctl,
1052#ifdef CONFIG_COMPAT
1053		.compat_ioctl = snd_compr_ioctl_compat,
1054#endif
1055		.mmap =		snd_compr_mmap,
1056		.poll =		snd_compr_poll,
1057};
1058
1059static int snd_compress_dev_register(struct snd_device *device)
1060{
1061	int ret;
1062	struct snd_compr *compr;
1063
1064	if (snd_BUG_ON(!device || !device->device_data))
1065		return -EBADFD;
1066	compr = device->device_data;
1067
1068	pr_debug("reg device %s, direction %d\n", compr->name,
1069			compr->direction);
1070	/* register compressed device */
1071	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1072				  compr->card, compr->device,
1073				  &snd_compr_file_ops, compr, compr->dev);
1074	if (ret < 0) {
1075		pr_err("snd_register_device failed %d\n", ret);
1076		return ret;
1077	}
1078	return ret;
1079
1080}
1081
1082static int snd_compress_dev_disconnect(struct snd_device *device)
1083{
1084	struct snd_compr *compr;
1085
1086	compr = device->device_data;
1087	snd_unregister_device(compr->dev);
1088	return 0;
1089}
1090
1091#ifdef CONFIG_SND_VERBOSE_PROCFS
1092static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1093					struct snd_info_buffer *buffer)
1094{
1095	struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1096
1097	snd_iprintf(buffer, "card: %d\n", compr->card->number);
1098	snd_iprintf(buffer, "device: %d\n", compr->device);
1099	snd_iprintf(buffer, "stream: %s\n",
1100			compr->direction == SND_COMPRESS_PLAYBACK
1101				? "PLAYBACK" : "CAPTURE");
1102	snd_iprintf(buffer, "id: %s\n", compr->id);
1103}
1104
1105static int snd_compress_proc_init(struct snd_compr *compr)
1106{
1107	struct snd_info_entry *entry;
1108	char name[16];
1109
1110	sprintf(name, "compr%i", compr->device);
1111	entry = snd_info_create_card_entry(compr->card, name,
1112					   compr->card->proc_root);
1113	if (!entry)
1114		return -ENOMEM;
1115	entry->mode = S_IFDIR | 0555;
1116	compr->proc_root = entry;
1117
1118	entry = snd_info_create_card_entry(compr->card, "info",
1119					   compr->proc_root);
1120	if (entry)
1121		snd_info_set_text_ops(entry, compr,
1122				      snd_compress_proc_info_read);
1123	compr->proc_info_entry = entry;
1124
1125	return 0;
1126}
1127
1128static void snd_compress_proc_done(struct snd_compr *compr)
1129{
1130	snd_info_free_entry(compr->proc_info_entry);
1131	compr->proc_info_entry = NULL;
1132	snd_info_free_entry(compr->proc_root);
1133	compr->proc_root = NULL;
1134}
1135
1136static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1137{
1138	strscpy(compr->id, id, sizeof(compr->id));
1139}
1140#else
1141static inline int snd_compress_proc_init(struct snd_compr *compr)
1142{
1143	return 0;
1144}
1145
1146static inline void snd_compress_proc_done(struct snd_compr *compr)
1147{
1148}
1149
1150static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1151{
1152}
1153#endif
1154
1155static int snd_compress_dev_free(struct snd_device *device)
1156{
1157	struct snd_compr *compr;
1158
1159	compr = device->device_data;
1160	snd_compress_proc_done(compr);
1161	put_device(compr->dev);
1162	return 0;
1163}
1164
1165/**
1166 * snd_compress_new: create new compress device
1167 * @card: sound card pointer
1168 * @device: device number
1169 * @dirn: device direction, should be of type enum snd_compr_direction
1170 * @id: ID string
1171 * @compr: compress device pointer
1172 *
1173 * Return: zero if successful, or a negative error code
1174 */
1175int snd_compress_new(struct snd_card *card, int device,
1176			int dirn, const char *id, struct snd_compr *compr)
1177{
1178	static const struct snd_device_ops ops = {
1179		.dev_free = snd_compress_dev_free,
1180		.dev_register = snd_compress_dev_register,
1181		.dev_disconnect = snd_compress_dev_disconnect,
1182	};
1183	int ret;
1184
1185	compr->card = card;
1186	compr->device = device;
1187	compr->direction = dirn;
1188	mutex_init(&compr->lock);
1189
1190	snd_compress_set_id(compr, id);
1191
1192	ret = snd_device_alloc(&compr->dev, card);
1193	if (ret)
1194		return ret;
1195	dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1196
1197	ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1198	if (ret == 0)
1199		snd_compress_proc_init(compr);
1200	else
1201		put_device(compr->dev);
1202
1203	return ret;
1204}
1205EXPORT_SYMBOL_GPL(snd_compress_new);
1206
1207MODULE_DESCRIPTION("ALSA Compressed offload framework");
1208MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1209MODULE_LICENSE("GPL v2");
1210