1// SPDX-License-Identifier: GPL-2.0-only
2/* The industrial I/O core
3 *
4 * Copyright (c) 2008 Jonathan Cameron
5 *
6 * Handling of buffer allocation / resizing.
7 *
8 * Things to look at here.
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
11 */
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/device.h>
15#include <linux/fs.h>
16#include <linux/cdev.h>
17#include <linux/slab.h>
18#include <linux/poll.h>
19#include <linux/sched/signal.h>
20
21#include <linux/iio/iio.h>
22#include <linux/iio/iio-opaque.h>
23#include "iio_core.h"
24#include "iio_core_trigger.h"
25#include <linux/iio/sysfs.h>
26#include <linux/iio/buffer.h>
27#include <linux/iio/buffer_impl.h>
28
29static const char * const iio_endian_prefix[] = {
30	[IIO_BE] = "be",
31	[IIO_LE] = "le",
32};
33
34static bool iio_buffer_is_active(struct iio_buffer *buf)
35{
36	return !list_empty(&buf->buffer_list);
37}
38
39static size_t iio_buffer_data_available(struct iio_buffer *buf)
40{
41	return buf->access->data_available(buf);
42}
43
44static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
45				   struct iio_buffer *buf, size_t required)
46{
47	if (!indio_dev->info->hwfifo_flush_to_buffer)
48		return -ENODEV;
49
50	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
51}
52
53static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
54			     size_t to_wait, int to_flush)
55{
56	size_t avail;
57	int flushed = 0;
58
59	/* wakeup if the device was unregistered */
60	if (!indio_dev->info)
61		return true;
62
63	/* drain the buffer if it was disabled */
64	if (!iio_buffer_is_active(buf)) {
65		to_wait = min_t(size_t, to_wait, 1);
66		to_flush = 0;
67	}
68
69	avail = iio_buffer_data_available(buf);
70
71	if (avail >= to_wait) {
72		/* force a flush for non-blocking reads */
73		if (!to_wait && avail < to_flush)
74			iio_buffer_flush_hwfifo(indio_dev, buf,
75						to_flush - avail);
76		return true;
77	}
78
79	if (to_flush)
80		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
81						  to_wait - avail);
82	if (flushed <= 0)
83		return false;
84
85	if (avail + flushed >= to_wait)
86		return true;
87
88	return false;
89}
90
91/**
92 * iio_buffer_read_outer() - chrdev read for buffer access
93 * @filp:	File structure pointer for the char device
94 * @buf:	Destination buffer for iio buffer read
95 * @n:		First n bytes to read
96 * @f_ps:	Long offset provided by the user as a seek position
97 *
98 * This function relies on all buffer implementations having an
99 * iio_buffer as their first element.
100 *
101 * Return: negative values corresponding to error codes or ret != 0
102 *	   for ending the reading activity
103 **/
104ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
105			      size_t n, loff_t *f_ps)
106{
107	struct iio_dev *indio_dev = filp->private_data;
108	struct iio_buffer *rb = indio_dev->buffer;
109	DEFINE_WAIT_FUNC(wait, woken_wake_function);
110	size_t datum_size;
111	size_t to_wait;
112	int ret = 0;
113
114	if (!indio_dev->info)
115		return -ENODEV;
116
117	if (!rb || !rb->access->read)
118		return -EINVAL;
119
120	datum_size = rb->bytes_per_datum;
121
122	/*
123	 * If datum_size is 0 there will never be anything to read from the
124	 * buffer, so signal end of file now.
125	 */
126	if (!datum_size)
127		return 0;
128
129	if (filp->f_flags & O_NONBLOCK)
130		to_wait = 0;
131	else
132		to_wait = min_t(size_t, n / datum_size, rb->watermark);
133
134	add_wait_queue(&rb->pollq, &wait);
135	do {
136		if (!indio_dev->info) {
137			ret = -ENODEV;
138			break;
139		}
140
141		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
142			if (signal_pending(current)) {
143				ret = -ERESTARTSYS;
144				break;
145			}
146
147			wait_woken(&wait, TASK_INTERRUPTIBLE,
148				   MAX_SCHEDULE_TIMEOUT);
149			continue;
150		}
151
152		ret = rb->access->read(rb, n, buf);
153		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
154			ret = -EAGAIN;
155	} while (ret == 0);
156	remove_wait_queue(&rb->pollq, &wait);
157
158	return ret;
159}
160
161/**
162 * iio_buffer_poll() - poll the buffer to find out if it has data
163 * @filp:	File structure pointer for device access
164 * @wait:	Poll table structure pointer for which the driver adds
165 *		a wait queue
166 *
167 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
168 *	   or 0 for other cases
169 */
170__poll_t iio_buffer_poll(struct file *filp,
171			     struct poll_table_struct *wait)
172{
173	struct iio_dev *indio_dev = filp->private_data;
174	struct iio_buffer *rb = indio_dev->buffer;
175
176	if (!indio_dev->info || rb == NULL)
177		return 0;
178
179	poll_wait(filp, &rb->pollq, wait);
180	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
181		return EPOLLIN | EPOLLRDNORM;
182	return 0;
183}
184
185/**
186 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
187 * @indio_dev: The IIO device
188 *
189 * Wakes up the event waitqueue used for poll(). Should usually
190 * be called when the device is unregistered.
191 */
192void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
193{
194	struct iio_buffer *buffer = indio_dev->buffer;
195
196	if (!buffer)
197		return;
198
199	wake_up(&buffer->pollq);
200}
201
202void iio_buffer_init(struct iio_buffer *buffer)
203{
204	INIT_LIST_HEAD(&buffer->demux_list);
205	INIT_LIST_HEAD(&buffer->buffer_list);
206	init_waitqueue_head(&buffer->pollq);
207	kref_init(&buffer->ref);
208	if (!buffer->watermark)
209		buffer->watermark = 1;
210}
211EXPORT_SYMBOL(iio_buffer_init);
212
213/**
214 * iio_buffer_set_attrs - Set buffer specific attributes
215 * @buffer: The buffer for which we are setting attributes
216 * @attrs: Pointer to a null terminated list of pointers to attributes
217 */
218void iio_buffer_set_attrs(struct iio_buffer *buffer,
219			 const struct attribute **attrs)
220{
221	buffer->attrs = attrs;
222}
223EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
224
225static ssize_t iio_show_scan_index(struct device *dev,
226				   struct device_attribute *attr,
227				   char *buf)
228{
229	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
230}
231
232static ssize_t iio_show_fixed_type(struct device *dev,
233				   struct device_attribute *attr,
234				   char *buf)
235{
236	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
237	u8 type = this_attr->c->scan_type.endianness;
238
239	if (type == IIO_CPU) {
240#ifdef __LITTLE_ENDIAN
241		type = IIO_LE;
242#else
243		type = IIO_BE;
244#endif
245	}
246	if (this_attr->c->scan_type.repeat > 1)
247		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
248		       iio_endian_prefix[type],
249		       this_attr->c->scan_type.sign,
250		       this_attr->c->scan_type.realbits,
251		       this_attr->c->scan_type.storagebits,
252		       this_attr->c->scan_type.repeat,
253		       this_attr->c->scan_type.shift);
254		else
255			return sprintf(buf, "%s:%c%d/%d>>%u\n",
256		       iio_endian_prefix[type],
257		       this_attr->c->scan_type.sign,
258		       this_attr->c->scan_type.realbits,
259		       this_attr->c->scan_type.storagebits,
260		       this_attr->c->scan_type.shift);
261}
262
263static ssize_t iio_scan_el_show(struct device *dev,
264				struct device_attribute *attr,
265				char *buf)
266{
267	int ret;
268	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
269	struct iio_buffer *buffer = indio_dev->buffer;
270
271	/* Ensure ret is 0 or 1. */
272	ret = !!test_bit(to_iio_dev_attr(attr)->address,
273		       buffer->scan_mask);
274
275	return sprintf(buf, "%d\n", ret);
276}
277
278/* Note NULL used as error indicator as it doesn't make sense. */
279static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
280					  unsigned int masklength,
281					  const unsigned long *mask,
282					  bool strict)
283{
284	if (bitmap_empty(mask, masklength))
285		return NULL;
286	while (*av_masks) {
287		if (strict) {
288			if (bitmap_equal(mask, av_masks, masklength))
289				return av_masks;
290		} else {
291			if (bitmap_subset(mask, av_masks, masklength))
292				return av_masks;
293		}
294		av_masks += BITS_TO_LONGS(masklength);
295	}
296	return NULL;
297}
298
299static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
300	const unsigned long *mask)
301{
302	if (!indio_dev->setup_ops->validate_scan_mask)
303		return true;
304
305	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
306}
307
308/**
309 * iio_scan_mask_set() - set particular bit in the scan mask
310 * @indio_dev: the iio device
311 * @buffer: the buffer whose scan mask we are interested in
312 * @bit: the bit to be set.
313 *
314 * Note that at this point we have no way of knowing what other
315 * buffers might request, hence this code only verifies that the
316 * individual buffers request is plausible.
317 */
318static int iio_scan_mask_set(struct iio_dev *indio_dev,
319		      struct iio_buffer *buffer, int bit)
320{
321	const unsigned long *mask;
322	unsigned long *trialmask;
323
324	trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
325	if (trialmask == NULL)
326		return -ENOMEM;
327	if (!indio_dev->masklength) {
328		WARN(1, "Trying to set scanmask prior to registering buffer\n");
329		goto err_invalid_mask;
330	}
331	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
332	set_bit(bit, trialmask);
333
334	if (!iio_validate_scan_mask(indio_dev, trialmask))
335		goto err_invalid_mask;
336
337	if (indio_dev->available_scan_masks) {
338		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
339					   indio_dev->masklength,
340					   trialmask, false);
341		if (!mask)
342			goto err_invalid_mask;
343	}
344	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
345
346	bitmap_free(trialmask);
347
348	return 0;
349
350err_invalid_mask:
351	bitmap_free(trialmask);
352	return -EINVAL;
353}
354
355static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
356{
357	clear_bit(bit, buffer->scan_mask);
358	return 0;
359}
360
361static int iio_scan_mask_query(struct iio_dev *indio_dev,
362			       struct iio_buffer *buffer, int bit)
363{
364	if (bit > indio_dev->masklength)
365		return -EINVAL;
366
367	if (!buffer->scan_mask)
368		return 0;
369
370	/* Ensure return value is 0 or 1. */
371	return !!test_bit(bit, buffer->scan_mask);
372};
373
374static ssize_t iio_scan_el_store(struct device *dev,
375				 struct device_attribute *attr,
376				 const char *buf,
377				 size_t len)
378{
379	int ret;
380	bool state;
381	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
382	struct iio_buffer *buffer = indio_dev->buffer;
383	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
384
385	ret = strtobool(buf, &state);
386	if (ret < 0)
387		return ret;
388	mutex_lock(&indio_dev->mlock);
389	if (iio_buffer_is_active(buffer)) {
390		ret = -EBUSY;
391		goto error_ret;
392	}
393	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
394	if (ret < 0)
395		goto error_ret;
396	if (!state && ret) {
397		ret = iio_scan_mask_clear(buffer, this_attr->address);
398		if (ret)
399			goto error_ret;
400	} else if (state && !ret) {
401		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
402		if (ret)
403			goto error_ret;
404	}
405
406error_ret:
407	mutex_unlock(&indio_dev->mlock);
408
409	return ret < 0 ? ret : len;
410
411}
412
413static ssize_t iio_scan_el_ts_show(struct device *dev,
414				   struct device_attribute *attr,
415				   char *buf)
416{
417	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
418	struct iio_buffer *buffer = indio_dev->buffer;
419
420	return sprintf(buf, "%d\n", buffer->scan_timestamp);
421}
422
423static ssize_t iio_scan_el_ts_store(struct device *dev,
424				    struct device_attribute *attr,
425				    const char *buf,
426				    size_t len)
427{
428	int ret;
429	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
430	struct iio_buffer *buffer = indio_dev->buffer;
431	bool state;
432
433	ret = strtobool(buf, &state);
434	if (ret < 0)
435		return ret;
436
437	mutex_lock(&indio_dev->mlock);
438	if (iio_buffer_is_active(buffer)) {
439		ret = -EBUSY;
440		goto error_ret;
441	}
442	buffer->scan_timestamp = state;
443error_ret:
444	mutex_unlock(&indio_dev->mlock);
445
446	return ret ? ret : len;
447}
448
449static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
450					struct iio_buffer *buffer,
451					const struct iio_chan_spec *chan)
452{
453	int ret, attrcount = 0;
454
455	ret = __iio_add_chan_devattr("index",
456				     chan,
457				     &iio_show_scan_index,
458				     NULL,
459				     0,
460				     IIO_SEPARATE,
461				     &indio_dev->dev,
462				     &buffer->scan_el_dev_attr_list);
463	if (ret)
464		return ret;
465	attrcount++;
466	ret = __iio_add_chan_devattr("type",
467				     chan,
468				     &iio_show_fixed_type,
469				     NULL,
470				     0,
471				     0,
472				     &indio_dev->dev,
473				     &buffer->scan_el_dev_attr_list);
474	if (ret)
475		return ret;
476	attrcount++;
477	if (chan->type != IIO_TIMESTAMP)
478		ret = __iio_add_chan_devattr("en",
479					     chan,
480					     &iio_scan_el_show,
481					     &iio_scan_el_store,
482					     chan->scan_index,
483					     0,
484					     &indio_dev->dev,
485					     &buffer->scan_el_dev_attr_list);
486	else
487		ret = __iio_add_chan_devattr("en",
488					     chan,
489					     &iio_scan_el_ts_show,
490					     &iio_scan_el_ts_store,
491					     chan->scan_index,
492					     0,
493					     &indio_dev->dev,
494					     &buffer->scan_el_dev_attr_list);
495	if (ret)
496		return ret;
497	attrcount++;
498	ret = attrcount;
499	return ret;
500}
501
502static ssize_t iio_buffer_read_length(struct device *dev,
503				      struct device_attribute *attr,
504				      char *buf)
505{
506	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
507	struct iio_buffer *buffer = indio_dev->buffer;
508
509	return sprintf(buf, "%d\n", buffer->length);
510}
511
512static ssize_t iio_buffer_write_length(struct device *dev,
513				       struct device_attribute *attr,
514				       const char *buf, size_t len)
515{
516	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
517	struct iio_buffer *buffer = indio_dev->buffer;
518	unsigned int val;
519	int ret;
520
521	ret = kstrtouint(buf, 10, &val);
522	if (ret)
523		return ret;
524
525	if (val == buffer->length)
526		return len;
527
528	mutex_lock(&indio_dev->mlock);
529	if (iio_buffer_is_active(buffer)) {
530		ret = -EBUSY;
531	} else {
532		buffer->access->set_length(buffer, val);
533		ret = 0;
534	}
535	if (ret)
536		goto out;
537	if (buffer->length && buffer->length < buffer->watermark)
538		buffer->watermark = buffer->length;
539out:
540	mutex_unlock(&indio_dev->mlock);
541
542	return ret ? ret : len;
543}
544
545static ssize_t iio_buffer_show_enable(struct device *dev,
546				      struct device_attribute *attr,
547				      char *buf)
548{
549	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
550	struct iio_buffer *buffer = indio_dev->buffer;
551
552	return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
553}
554
555static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
556					     unsigned int scan_index)
557{
558	const struct iio_chan_spec *ch;
559	unsigned int bytes;
560
561	ch = iio_find_channel_from_si(indio_dev, scan_index);
562	bytes = ch->scan_type.storagebits / 8;
563	if (ch->scan_type.repeat > 1)
564		bytes *= ch->scan_type.repeat;
565	return bytes;
566}
567
568static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
569{
570	return iio_storage_bytes_for_si(indio_dev,
571					indio_dev->scan_index_timestamp);
572}
573
574static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
575				const unsigned long *mask, bool timestamp)
576{
577	unsigned bytes = 0;
578	int length, i, largest = 0;
579
580	/* How much space will the demuxed element take? */
581	for_each_set_bit(i, mask,
582			 indio_dev->masklength) {
583		length = iio_storage_bytes_for_si(indio_dev, i);
584		bytes = ALIGN(bytes, length);
585		bytes += length;
586		largest = max(largest, length);
587	}
588
589	if (timestamp) {
590		length = iio_storage_bytes_for_timestamp(indio_dev);
591		bytes = ALIGN(bytes, length);
592		bytes += length;
593		largest = max(largest, length);
594	}
595
596	bytes = ALIGN(bytes, largest);
597	return bytes;
598}
599
600static void iio_buffer_activate(struct iio_dev *indio_dev,
601	struct iio_buffer *buffer)
602{
603	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
604
605	iio_buffer_get(buffer);
606	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
607}
608
609static void iio_buffer_deactivate(struct iio_buffer *buffer)
610{
611	list_del_init(&buffer->buffer_list);
612	wake_up_interruptible(&buffer->pollq);
613	iio_buffer_put(buffer);
614}
615
616static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
617{
618	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
619	struct iio_buffer *buffer, *_buffer;
620
621	list_for_each_entry_safe(buffer, _buffer,
622			&iio_dev_opaque->buffer_list, buffer_list)
623		iio_buffer_deactivate(buffer);
624}
625
626static int iio_buffer_enable(struct iio_buffer *buffer,
627	struct iio_dev *indio_dev)
628{
629	if (!buffer->access->enable)
630		return 0;
631	return buffer->access->enable(buffer, indio_dev);
632}
633
634static int iio_buffer_disable(struct iio_buffer *buffer,
635	struct iio_dev *indio_dev)
636{
637	if (!buffer->access->disable)
638		return 0;
639	return buffer->access->disable(buffer, indio_dev);
640}
641
642static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
643	struct iio_buffer *buffer)
644{
645	unsigned int bytes;
646
647	if (!buffer->access->set_bytes_per_datum)
648		return;
649
650	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
651		buffer->scan_timestamp);
652
653	buffer->access->set_bytes_per_datum(buffer, bytes);
654}
655
656static int iio_buffer_request_update(struct iio_dev *indio_dev,
657	struct iio_buffer *buffer)
658{
659	int ret;
660
661	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
662	if (buffer->access->request_update) {
663		ret = buffer->access->request_update(buffer);
664		if (ret) {
665			dev_dbg(&indio_dev->dev,
666			       "Buffer not started: buffer parameter update failed (%d)\n",
667				ret);
668			return ret;
669		}
670	}
671
672	return 0;
673}
674
675static void iio_free_scan_mask(struct iio_dev *indio_dev,
676	const unsigned long *mask)
677{
678	/* If the mask is dynamically allocated free it, otherwise do nothing */
679	if (!indio_dev->available_scan_masks)
680		bitmap_free(mask);
681}
682
683struct iio_device_config {
684	unsigned int mode;
685	unsigned int watermark;
686	const unsigned long *scan_mask;
687	unsigned int scan_bytes;
688	bool scan_timestamp;
689};
690
691static int iio_verify_update(struct iio_dev *indio_dev,
692	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
693	struct iio_device_config *config)
694{
695	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
696	unsigned long *compound_mask;
697	const unsigned long *scan_mask;
698	bool strict_scanmask = false;
699	struct iio_buffer *buffer;
700	bool scan_timestamp;
701	unsigned int modes;
702
703	if (insert_buffer &&
704	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
705		dev_dbg(&indio_dev->dev,
706			"At least one scan element must be enabled first\n");
707		return -EINVAL;
708	}
709
710	memset(config, 0, sizeof(*config));
711	config->watermark = ~0;
712
713	/*
714	 * If there is just one buffer and we are removing it there is nothing
715	 * to verify.
716	 */
717	if (remove_buffer && !insert_buffer &&
718		list_is_singular(&iio_dev_opaque->buffer_list))
719			return 0;
720
721	modes = indio_dev->modes;
722
723	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
724		if (buffer == remove_buffer)
725			continue;
726		modes &= buffer->access->modes;
727		config->watermark = min(config->watermark, buffer->watermark);
728	}
729
730	if (insert_buffer) {
731		modes &= insert_buffer->access->modes;
732		config->watermark = min(config->watermark,
733			insert_buffer->watermark);
734	}
735
736	/* Definitely possible for devices to support both of these. */
737	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
738		config->mode = INDIO_BUFFER_TRIGGERED;
739	} else if (modes & INDIO_BUFFER_HARDWARE) {
740		/*
741		 * Keep things simple for now and only allow a single buffer to
742		 * be connected in hardware mode.
743		 */
744		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
745			return -EINVAL;
746		config->mode = INDIO_BUFFER_HARDWARE;
747		strict_scanmask = true;
748	} else if (modes & INDIO_BUFFER_SOFTWARE) {
749		config->mode = INDIO_BUFFER_SOFTWARE;
750	} else {
751		/* Can only occur on first buffer */
752		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
753			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
754		return -EINVAL;
755	}
756
757	/* What scan mask do we actually have? */
758	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
759	if (compound_mask == NULL)
760		return -ENOMEM;
761
762	scan_timestamp = false;
763
764	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
765		if (buffer == remove_buffer)
766			continue;
767		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
768			  indio_dev->masklength);
769		scan_timestamp |= buffer->scan_timestamp;
770	}
771
772	if (insert_buffer) {
773		bitmap_or(compound_mask, compound_mask,
774			  insert_buffer->scan_mask, indio_dev->masklength);
775		scan_timestamp |= insert_buffer->scan_timestamp;
776	}
777
778	if (indio_dev->available_scan_masks) {
779		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
780				    indio_dev->masklength,
781				    compound_mask,
782				    strict_scanmask);
783		bitmap_free(compound_mask);
784		if (scan_mask == NULL)
785			return -EINVAL;
786	} else {
787	    scan_mask = compound_mask;
788	}
789
790	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
791				    scan_mask, scan_timestamp);
792	config->scan_mask = scan_mask;
793	config->scan_timestamp = scan_timestamp;
794
795	return 0;
796}
797
798/**
799 * struct iio_demux_table - table describing demux memcpy ops
800 * @from:	index to copy from
801 * @to:		index to copy to
802 * @length:	how many bytes to copy
803 * @l:		list head used for management
804 */
805struct iio_demux_table {
806	unsigned from;
807	unsigned to;
808	unsigned length;
809	struct list_head l;
810};
811
812static void iio_buffer_demux_free(struct iio_buffer *buffer)
813{
814	struct iio_demux_table *p, *q;
815	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
816		list_del(&p->l);
817		kfree(p);
818	}
819}
820
821static int iio_buffer_add_demux(struct iio_buffer *buffer,
822	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
823	unsigned int length)
824{
825
826	if (*p && (*p)->from + (*p)->length == in_loc &&
827		(*p)->to + (*p)->length == out_loc) {
828		(*p)->length += length;
829	} else {
830		*p = kmalloc(sizeof(**p), GFP_KERNEL);
831		if (*p == NULL)
832			return -ENOMEM;
833		(*p)->from = in_loc;
834		(*p)->to = out_loc;
835		(*p)->length = length;
836		list_add_tail(&(*p)->l, &buffer->demux_list);
837	}
838
839	return 0;
840}
841
842static int iio_buffer_update_demux(struct iio_dev *indio_dev,
843				   struct iio_buffer *buffer)
844{
845	int ret, in_ind = -1, out_ind, length;
846	unsigned in_loc = 0, out_loc = 0;
847	struct iio_demux_table *p = NULL;
848
849	/* Clear out any old demux */
850	iio_buffer_demux_free(buffer);
851	kfree(buffer->demux_bounce);
852	buffer->demux_bounce = NULL;
853
854	/* First work out which scan mode we will actually have */
855	if (bitmap_equal(indio_dev->active_scan_mask,
856			 buffer->scan_mask,
857			 indio_dev->masklength))
858		return 0;
859
860	/* Now we have the two masks, work from least sig and build up sizes */
861	for_each_set_bit(out_ind,
862			 buffer->scan_mask,
863			 indio_dev->masklength) {
864		in_ind = find_next_bit(indio_dev->active_scan_mask,
865				       indio_dev->masklength,
866				       in_ind + 1);
867		while (in_ind != out_ind) {
868			length = iio_storage_bytes_for_si(indio_dev, in_ind);
869			/* Make sure we are aligned */
870			in_loc = roundup(in_loc, length) + length;
871			in_ind = find_next_bit(indio_dev->active_scan_mask,
872					       indio_dev->masklength,
873					       in_ind + 1);
874		}
875		length = iio_storage_bytes_for_si(indio_dev, in_ind);
876		out_loc = roundup(out_loc, length);
877		in_loc = roundup(in_loc, length);
878		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
879		if (ret)
880			goto error_clear_mux_table;
881		out_loc += length;
882		in_loc += length;
883	}
884	/* Relies on scan_timestamp being last */
885	if (buffer->scan_timestamp) {
886		length = iio_storage_bytes_for_timestamp(indio_dev);
887		out_loc = roundup(out_loc, length);
888		in_loc = roundup(in_loc, length);
889		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
890		if (ret)
891			goto error_clear_mux_table;
892		out_loc += length;
893		in_loc += length;
894	}
895	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
896	if (buffer->demux_bounce == NULL) {
897		ret = -ENOMEM;
898		goto error_clear_mux_table;
899	}
900	return 0;
901
902error_clear_mux_table:
903	iio_buffer_demux_free(buffer);
904
905	return ret;
906}
907
908static int iio_update_demux(struct iio_dev *indio_dev)
909{
910	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
911	struct iio_buffer *buffer;
912	int ret;
913
914	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
915		ret = iio_buffer_update_demux(indio_dev, buffer);
916		if (ret < 0)
917			goto error_clear_mux_table;
918	}
919	return 0;
920
921error_clear_mux_table:
922	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
923		iio_buffer_demux_free(buffer);
924
925	return ret;
926}
927
928static int iio_enable_buffers(struct iio_dev *indio_dev,
929	struct iio_device_config *config)
930{
931	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
932	struct iio_buffer *buffer;
933	int ret;
934
935	indio_dev->active_scan_mask = config->scan_mask;
936	indio_dev->scan_timestamp = config->scan_timestamp;
937	indio_dev->scan_bytes = config->scan_bytes;
938	indio_dev->currentmode = config->mode;
939
940	iio_update_demux(indio_dev);
941
942	/* Wind up again */
943	if (indio_dev->setup_ops->preenable) {
944		ret = indio_dev->setup_ops->preenable(indio_dev);
945		if (ret) {
946			dev_dbg(&indio_dev->dev,
947			       "Buffer not started: buffer preenable failed (%d)\n", ret);
948			goto err_undo_config;
949		}
950	}
951
952	if (indio_dev->info->update_scan_mode) {
953		ret = indio_dev->info
954			->update_scan_mode(indio_dev,
955					   indio_dev->active_scan_mask);
956		if (ret < 0) {
957			dev_dbg(&indio_dev->dev,
958				"Buffer not started: update scan mode failed (%d)\n",
959				ret);
960			goto err_run_postdisable;
961		}
962	}
963
964	if (indio_dev->info->hwfifo_set_watermark)
965		indio_dev->info->hwfifo_set_watermark(indio_dev,
966			config->watermark);
967
968	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
969		ret = iio_buffer_enable(buffer, indio_dev);
970		if (ret)
971			goto err_disable_buffers;
972	}
973
974	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
975		ret = iio_trigger_attach_poll_func(indio_dev->trig,
976						   indio_dev->pollfunc);
977		if (ret)
978			goto err_disable_buffers;
979	}
980
981	if (indio_dev->setup_ops->postenable) {
982		ret = indio_dev->setup_ops->postenable(indio_dev);
983		if (ret) {
984			dev_dbg(&indio_dev->dev,
985			       "Buffer not started: postenable failed (%d)\n", ret);
986			goto err_detach_pollfunc;
987		}
988	}
989
990	return 0;
991
992err_detach_pollfunc:
993	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
994		iio_trigger_detach_poll_func(indio_dev->trig,
995					     indio_dev->pollfunc);
996	}
997err_disable_buffers:
998	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
999					     buffer_list)
1000		iio_buffer_disable(buffer, indio_dev);
1001err_run_postdisable:
1002	if (indio_dev->setup_ops->postdisable)
1003		indio_dev->setup_ops->postdisable(indio_dev);
1004err_undo_config:
1005	indio_dev->currentmode = INDIO_DIRECT_MODE;
1006	indio_dev->active_scan_mask = NULL;
1007
1008	return ret;
1009}
1010
1011static int iio_disable_buffers(struct iio_dev *indio_dev)
1012{
1013	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1014	struct iio_buffer *buffer;
1015	int ret = 0;
1016	int ret2;
1017
1018	/* Wind down existing buffers - iff there are any */
1019	if (list_empty(&iio_dev_opaque->buffer_list))
1020		return 0;
1021
1022	/*
1023	 * If things go wrong at some step in disable we still need to continue
1024	 * to perform the other steps, otherwise we leave the device in a
1025	 * inconsistent state. We return the error code for the first error we
1026	 * encountered.
1027	 */
1028
1029	if (indio_dev->setup_ops->predisable) {
1030		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1031		if (ret2 && !ret)
1032			ret = ret2;
1033	}
1034
1035	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1036		iio_trigger_detach_poll_func(indio_dev->trig,
1037					     indio_dev->pollfunc);
1038	}
1039
1040	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1041		ret2 = iio_buffer_disable(buffer, indio_dev);
1042		if (ret2 && !ret)
1043			ret = ret2;
1044	}
1045
1046	if (indio_dev->setup_ops->postdisable) {
1047		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1048		if (ret2 && !ret)
1049			ret = ret2;
1050	}
1051
1052	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1053	indio_dev->active_scan_mask = NULL;
1054	indio_dev->currentmode = INDIO_DIRECT_MODE;
1055
1056	return ret;
1057}
1058
1059static int __iio_update_buffers(struct iio_dev *indio_dev,
1060		       struct iio_buffer *insert_buffer,
1061		       struct iio_buffer *remove_buffer)
1062{
1063	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1064	struct iio_device_config new_config;
1065	int ret;
1066
1067	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1068		&new_config);
1069	if (ret)
1070		return ret;
1071
1072	if (insert_buffer) {
1073		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1074		if (ret)
1075			goto err_free_config;
1076	}
1077
1078	ret = iio_disable_buffers(indio_dev);
1079	if (ret)
1080		goto err_deactivate_all;
1081
1082	if (remove_buffer)
1083		iio_buffer_deactivate(remove_buffer);
1084	if (insert_buffer)
1085		iio_buffer_activate(indio_dev, insert_buffer);
1086
1087	/* If no buffers in list, we are done */
1088	if (list_empty(&iio_dev_opaque->buffer_list))
1089		return 0;
1090
1091	ret = iio_enable_buffers(indio_dev, &new_config);
1092	if (ret)
1093		goto err_deactivate_all;
1094
1095	return 0;
1096
1097err_deactivate_all:
1098	/*
1099	 * We've already verified that the config is valid earlier. If things go
1100	 * wrong in either enable or disable the most likely reason is an IO
1101	 * error from the device. In this case there is no good recovery
1102	 * strategy. Just make sure to disable everything and leave the device
1103	 * in a sane state.  With a bit of luck the device might come back to
1104	 * life again later and userspace can try again.
1105	 */
1106	iio_buffer_deactivate_all(indio_dev);
1107
1108err_free_config:
1109	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1110	return ret;
1111}
1112
1113int iio_update_buffers(struct iio_dev *indio_dev,
1114		       struct iio_buffer *insert_buffer,
1115		       struct iio_buffer *remove_buffer)
1116{
1117	int ret;
1118
1119	if (insert_buffer == remove_buffer)
1120		return 0;
1121
1122	mutex_lock(&indio_dev->info_exist_lock);
1123	mutex_lock(&indio_dev->mlock);
1124
1125	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1126		insert_buffer = NULL;
1127
1128	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1129		remove_buffer = NULL;
1130
1131	if (!insert_buffer && !remove_buffer) {
1132		ret = 0;
1133		goto out_unlock;
1134	}
1135
1136	if (indio_dev->info == NULL) {
1137		ret = -ENODEV;
1138		goto out_unlock;
1139	}
1140
1141	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1142
1143out_unlock:
1144	mutex_unlock(&indio_dev->mlock);
1145	mutex_unlock(&indio_dev->info_exist_lock);
1146
1147	return ret;
1148}
1149EXPORT_SYMBOL_GPL(iio_update_buffers);
1150
1151void iio_disable_all_buffers(struct iio_dev *indio_dev)
1152{
1153	iio_disable_buffers(indio_dev);
1154	iio_buffer_deactivate_all(indio_dev);
1155}
1156
1157static ssize_t iio_buffer_store_enable(struct device *dev,
1158				       struct device_attribute *attr,
1159				       const char *buf,
1160				       size_t len)
1161{
1162	int ret;
1163	bool requested_state;
1164	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1165	struct iio_buffer *buffer = indio_dev->buffer;
1166	bool inlist;
1167
1168	ret = strtobool(buf, &requested_state);
1169	if (ret < 0)
1170		return ret;
1171
1172	mutex_lock(&indio_dev->mlock);
1173
1174	/* Find out if it is in the list */
1175	inlist = iio_buffer_is_active(buffer);
1176	/* Already in desired state */
1177	if (inlist == requested_state)
1178		goto done;
1179
1180	if (requested_state)
1181		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1182	else
1183		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1184
1185done:
1186	mutex_unlock(&indio_dev->mlock);
1187	return (ret < 0) ? ret : len;
1188}
1189
1190static const char * const iio_scan_elements_group_name = "scan_elements";
1191
1192static ssize_t iio_buffer_show_watermark(struct device *dev,
1193					 struct device_attribute *attr,
1194					 char *buf)
1195{
1196	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1197	struct iio_buffer *buffer = indio_dev->buffer;
1198
1199	return sprintf(buf, "%u\n", buffer->watermark);
1200}
1201
1202static ssize_t iio_buffer_store_watermark(struct device *dev,
1203					  struct device_attribute *attr,
1204					  const char *buf,
1205					  size_t len)
1206{
1207	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1208	struct iio_buffer *buffer = indio_dev->buffer;
1209	unsigned int val;
1210	int ret;
1211
1212	ret = kstrtouint(buf, 10, &val);
1213	if (ret)
1214		return ret;
1215	if (!val)
1216		return -EINVAL;
1217
1218	mutex_lock(&indio_dev->mlock);
1219
1220	if (val > buffer->length) {
1221		ret = -EINVAL;
1222		goto out;
1223	}
1224
1225	if (iio_buffer_is_active(buffer)) {
1226		ret = -EBUSY;
1227		goto out;
1228	}
1229
1230	buffer->watermark = val;
1231out:
1232	mutex_unlock(&indio_dev->mlock);
1233
1234	return ret ? ret : len;
1235}
1236
1237static ssize_t iio_dma_show_data_available(struct device *dev,
1238						struct device_attribute *attr,
1239						char *buf)
1240{
1241	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1242	struct iio_buffer *buffer = indio_dev->buffer;
1243
1244	return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
1245}
1246
1247static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1248		   iio_buffer_write_length);
1249static struct device_attribute dev_attr_length_ro = __ATTR(length,
1250	S_IRUGO, iio_buffer_read_length, NULL);
1251static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1252		   iio_buffer_show_enable, iio_buffer_store_enable);
1253static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1254		   iio_buffer_show_watermark, iio_buffer_store_watermark);
1255static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1256	S_IRUGO, iio_buffer_show_watermark, NULL);
1257static DEVICE_ATTR(data_available, S_IRUGO,
1258		iio_dma_show_data_available, NULL);
1259
1260static struct attribute *iio_buffer_attrs[] = {
1261	&dev_attr_length.attr,
1262	&dev_attr_enable.attr,
1263	&dev_attr_watermark.attr,
1264	&dev_attr_data_available.attr,
1265};
1266
1267static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1268					     struct iio_dev *indio_dev)
1269{
1270	struct iio_dev_attr *p;
1271	struct attribute **attr;
1272	int ret, i, attrn, attrcount;
1273	const struct iio_chan_spec *channels;
1274
1275	attrcount = 0;
1276	if (buffer->attrs) {
1277		while (buffer->attrs[attrcount] != NULL)
1278			attrcount++;
1279	}
1280
1281	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
1282		       sizeof(struct attribute *), GFP_KERNEL);
1283	if (!attr)
1284		return -ENOMEM;
1285
1286	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1287	if (!buffer->access->set_length)
1288		attr[0] = &dev_attr_length_ro.attr;
1289
1290	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1291		attr[2] = &dev_attr_watermark_ro.attr;
1292
1293	if (buffer->attrs)
1294		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1295		       sizeof(struct attribute *) * attrcount);
1296
1297	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
1298
1299	buffer->buffer_group.name = "buffer";
1300	buffer->buffer_group.attrs = attr;
1301
1302	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
1303
1304	attrcount = 0;
1305	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1306	channels = indio_dev->channels;
1307	if (channels) {
1308		/* new magic */
1309		for (i = 0; i < indio_dev->num_channels; i++) {
1310			if (channels[i].scan_index < 0)
1311				continue;
1312
1313			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1314							 &channels[i]);
1315			if (ret < 0)
1316				goto error_cleanup_dynamic;
1317			attrcount += ret;
1318			if (channels[i].type == IIO_TIMESTAMP)
1319				indio_dev->scan_index_timestamp =
1320					channels[i].scan_index;
1321		}
1322		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1323			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1324							  GFP_KERNEL);
1325			if (buffer->scan_mask == NULL) {
1326				ret = -ENOMEM;
1327				goto error_cleanup_dynamic;
1328			}
1329		}
1330	}
1331
1332	buffer->scan_el_group.name = iio_scan_elements_group_name;
1333
1334	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
1335					      sizeof(buffer->scan_el_group.attrs[0]),
1336					      GFP_KERNEL);
1337	if (buffer->scan_el_group.attrs == NULL) {
1338		ret = -ENOMEM;
1339		goto error_free_scan_mask;
1340	}
1341	attrn = 0;
1342
1343	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
1344		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
1345	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
1346
1347	return 0;
1348
1349error_free_scan_mask:
1350	bitmap_free(buffer->scan_mask);
1351error_cleanup_dynamic:
1352	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1353	kfree(buffer->buffer_group.attrs);
1354
1355	return ret;
1356}
1357
1358int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1359{
1360	struct iio_buffer *buffer = indio_dev->buffer;
1361	const struct iio_chan_spec *channels;
1362	int i;
1363
1364	channels = indio_dev->channels;
1365	if (channels) {
1366		int ml = indio_dev->masklength;
1367
1368		for (i = 0; i < indio_dev->num_channels; i++)
1369			ml = max(ml, channels[i].scan_index + 1);
1370		indio_dev->masklength = ml;
1371	}
1372
1373	if (!buffer)
1374		return 0;
1375
1376	return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev);
1377}
1378
1379static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
1380{
1381	bitmap_free(buffer->scan_mask);
1382	kfree(buffer->buffer_group.attrs);
1383	kfree(buffer->scan_el_group.attrs);
1384	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
1385}
1386
1387void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
1388{
1389	struct iio_buffer *buffer = indio_dev->buffer;
1390
1391	if (!buffer)
1392		return;
1393
1394	__iio_buffer_free_sysfs_and_mask(buffer);
1395}
1396
1397/**
1398 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1399 * @indio_dev: the iio device
1400 * @mask: scan mask to be checked
1401 *
1402 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1403 * can be used for devices where only one channel can be active for sampling at
1404 * a time.
1405 */
1406bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1407	const unsigned long *mask)
1408{
1409	return bitmap_weight(mask, indio_dev->masklength) == 1;
1410}
1411EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1412
1413static const void *iio_demux(struct iio_buffer *buffer,
1414				 const void *datain)
1415{
1416	struct iio_demux_table *t;
1417
1418	if (list_empty(&buffer->demux_list))
1419		return datain;
1420	list_for_each_entry(t, &buffer->demux_list, l)
1421		memcpy(buffer->demux_bounce + t->to,
1422		       datain + t->from, t->length);
1423
1424	return buffer->demux_bounce;
1425}
1426
1427static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1428{
1429	const void *dataout = iio_demux(buffer, data);
1430	int ret;
1431
1432	ret = buffer->access->store_to(buffer, dataout);
1433	if (ret)
1434		return ret;
1435
1436	/*
1437	 * We can't just test for watermark to decide if we wake the poll queue
1438	 * because read may request less samples than the watermark.
1439	 */
1440	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1441	return 0;
1442}
1443
1444/**
1445 * iio_push_to_buffers() - push to a registered buffer.
1446 * @indio_dev:		iio_dev structure for device.
1447 * @data:		Full scan.
1448 */
1449int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1450{
1451	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1452	int ret;
1453	struct iio_buffer *buf;
1454
1455	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1456		ret = iio_push_to_buffer(buf, data);
1457		if (ret < 0)
1458			return ret;
1459	}
1460
1461	return 0;
1462}
1463EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1464
1465/**
1466 * iio_buffer_release() - Free a buffer's resources
1467 * @ref: Pointer to the kref embedded in the iio_buffer struct
1468 *
1469 * This function is called when the last reference to the buffer has been
1470 * dropped. It will typically free all resources allocated by the buffer. Do not
1471 * call this function manually, always use iio_buffer_put() when done using a
1472 * buffer.
1473 */
1474static void iio_buffer_release(struct kref *ref)
1475{
1476	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1477
1478	buffer->access->release(buffer);
1479}
1480
1481/**
1482 * iio_buffer_get() - Grab a reference to the buffer
1483 * @buffer: The buffer to grab a reference for, may be NULL
1484 *
1485 * Returns the pointer to the buffer that was passed into the function.
1486 */
1487struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1488{
1489	if (buffer)
1490		kref_get(&buffer->ref);
1491
1492	return buffer;
1493}
1494EXPORT_SYMBOL_GPL(iio_buffer_get);
1495
1496/**
1497 * iio_buffer_put() - Release the reference to the buffer
1498 * @buffer: The buffer to release the reference for, may be NULL
1499 */
1500void iio_buffer_put(struct iio_buffer *buffer)
1501{
1502	if (buffer)
1503		kref_put(&buffer->ref, iio_buffer_release);
1504}
1505EXPORT_SYMBOL_GPL(iio_buffer_put);
1506
1507/**
1508 * iio_device_attach_buffer - Attach a buffer to a IIO device
1509 * @indio_dev: The device the buffer should be attached to
1510 * @buffer: The buffer to attach to the device
1511 *
1512 * This function attaches a buffer to a IIO device. The buffer stays attached to
1513 * the device until the device is freed. The function should only be called at
1514 * most once per device.
1515 */
1516void iio_device_attach_buffer(struct iio_dev *indio_dev,
1517			      struct iio_buffer *buffer)
1518{
1519	indio_dev->buffer = iio_buffer_get(buffer);
1520}
1521EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1522