xref: /kernel/linux/linux-5.10/drivers/iio/inkern.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* The industrial I/O core in kernel channel mapping
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 */
6#include <linux/err.h>
7#include <linux/export.h>
8#include <linux/slab.h>
9#include <linux/mutex.h>
10#include <linux/of.h>
11
12#include <linux/iio/iio.h>
13#include "iio_core.h"
14#include <linux/iio/machine.h>
15#include <linux/iio/driver.h>
16#include <linux/iio/consumer.h>
17
18struct iio_map_internal {
19	struct iio_dev *indio_dev;
20	struct iio_map *map;
21	struct list_head l;
22};
23
24static LIST_HEAD(iio_map_list);
25static DEFINE_MUTEX(iio_map_list_lock);
26
27int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
28{
29	int i = 0, ret = 0;
30	struct iio_map_internal *mapi;
31
32	if (maps == NULL)
33		return 0;
34
35	mutex_lock(&iio_map_list_lock);
36	while (maps[i].consumer_dev_name != NULL) {
37		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
38		if (mapi == NULL) {
39			ret = -ENOMEM;
40			goto error_ret;
41		}
42		mapi->map = &maps[i];
43		mapi->indio_dev = indio_dev;
44		list_add_tail(&mapi->l, &iio_map_list);
45		i++;
46	}
47error_ret:
48	mutex_unlock(&iio_map_list_lock);
49
50	return ret;
51}
52EXPORT_SYMBOL_GPL(iio_map_array_register);
53
54
55/*
56 * Remove all map entries associated with the given iio device
57 */
58int iio_map_array_unregister(struct iio_dev *indio_dev)
59{
60	int ret = -ENODEV;
61	struct iio_map_internal *mapi, *next;
62
63	mutex_lock(&iio_map_list_lock);
64	list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
65		if (indio_dev == mapi->indio_dev) {
66			list_del(&mapi->l);
67			kfree(mapi);
68			ret = 0;
69		}
70	}
71	mutex_unlock(&iio_map_list_lock);
72	return ret;
73}
74EXPORT_SYMBOL_GPL(iio_map_array_unregister);
75
76static const struct iio_chan_spec
77*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
78{
79	int i;
80	const struct iio_chan_spec *chan = NULL;
81
82	for (i = 0; i < indio_dev->num_channels; i++)
83		if (indio_dev->channels[i].datasheet_name &&
84		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
85			chan = &indio_dev->channels[i];
86			break;
87		}
88	return chan;
89}
90
91#ifdef CONFIG_OF
92
93static int iio_dev_node_match(struct device *dev, const void *data)
94{
95	return dev->of_node == data && dev->type == &iio_device_type;
96}
97
98/**
99 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
100 * @indio_dev:	pointer to the iio_dev structure
101 * @iiospec:	IIO specifier as found in the device tree
102 *
103 * This is simple translation function, suitable for the most 1:1 mapped
104 * channels in IIO chips. This function performs only one sanity check:
105 * whether IIO index is less than num_channels (that is specified in the
106 * iio_dev).
107 */
108static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
109				const struct of_phandle_args *iiospec)
110{
111	if (!iiospec->args_count)
112		return 0;
113
114	if (iiospec->args[0] >= indio_dev->num_channels) {
115		dev_err(&indio_dev->dev, "invalid channel index %u\n",
116			iiospec->args[0]);
117		return -EINVAL;
118	}
119
120	return iiospec->args[0];
121}
122
123static int __of_iio_channel_get(struct iio_channel *channel,
124				struct device_node *np, int index)
125{
126	struct device *idev;
127	struct iio_dev *indio_dev;
128	int err;
129	struct of_phandle_args iiospec;
130
131	err = of_parse_phandle_with_args(np, "io-channels",
132					 "#io-channel-cells",
133					 index, &iiospec);
134	if (err)
135		return err;
136
137	idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
138			       iio_dev_node_match);
139	if (idev == NULL) {
140		of_node_put(iiospec.np);
141		return -EPROBE_DEFER;
142	}
143
144	indio_dev = dev_to_iio_dev(idev);
145	channel->indio_dev = indio_dev;
146	if (indio_dev->info->of_xlate)
147		index = indio_dev->info->of_xlate(indio_dev, &iiospec);
148	else
149		index = __of_iio_simple_xlate(indio_dev, &iiospec);
150	of_node_put(iiospec.np);
151	if (index < 0)
152		goto err_put;
153	channel->channel = &indio_dev->channels[index];
154
155	return 0;
156
157err_put:
158	iio_device_put(indio_dev);
159	return index;
160}
161
162static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
163{
164	struct iio_channel *channel;
165	int err;
166
167	if (index < 0)
168		return ERR_PTR(-EINVAL);
169
170	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
171	if (channel == NULL)
172		return ERR_PTR(-ENOMEM);
173
174	err = __of_iio_channel_get(channel, np, index);
175	if (err)
176		goto err_free_channel;
177
178	return channel;
179
180err_free_channel:
181	kfree(channel);
182	return ERR_PTR(err);
183}
184
185static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
186						      const char *name)
187{
188	struct iio_channel *chan = NULL;
189
190	/* Walk up the tree of devices looking for a matching iio channel */
191	while (np) {
192		int index = 0;
193
194		/*
195		 * For named iio channels, first look up the name in the
196		 * "io-channel-names" property.  If it cannot be found, the
197		 * index will be an error code, and of_iio_channel_get()
198		 * will fail.
199		 */
200		if (name)
201			index = of_property_match_string(np, "io-channel-names",
202							 name);
203		chan = of_iio_channel_get(np, index);
204		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
205			break;
206		else if (name && index >= 0) {
207			pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
208				np, name ? name : "", index);
209			return NULL;
210		}
211
212		/*
213		 * No matching IIO channel found on this node.
214		 * If the parent node has a "io-channel-ranges" property,
215		 * then we can try one of its channels.
216		 */
217		np = np->parent;
218		if (np && !of_get_property(np, "io-channel-ranges", NULL))
219			return NULL;
220	}
221
222	return chan;
223}
224
225static struct iio_channel *of_iio_channel_get_all(struct device *dev)
226{
227	struct iio_channel *chans;
228	int i, mapind, nummaps = 0;
229	int ret;
230
231	do {
232		ret = of_parse_phandle_with_args(dev->of_node,
233						 "io-channels",
234						 "#io-channel-cells",
235						 nummaps, NULL);
236		if (ret < 0)
237			break;
238	} while (++nummaps);
239
240	if (nummaps == 0)	/* no error, return NULL to search map table */
241		return NULL;
242
243	/* NULL terminated array to save passing size */
244	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
245	if (chans == NULL)
246		return ERR_PTR(-ENOMEM);
247
248	/* Search for OF matches */
249	for (mapind = 0; mapind < nummaps; mapind++) {
250		ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
251					   mapind);
252		if (ret)
253			goto error_free_chans;
254	}
255	return chans;
256
257error_free_chans:
258	for (i = 0; i < mapind; i++)
259		iio_device_put(chans[i].indio_dev);
260	kfree(chans);
261	return ERR_PTR(ret);
262}
263
264#else /* CONFIG_OF */
265
266static inline struct iio_channel *
267of_iio_channel_get_by_name(struct device_node *np, const char *name)
268{
269	return NULL;
270}
271
272static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
273{
274	return NULL;
275}
276
277#endif /* CONFIG_OF */
278
279static struct iio_channel *iio_channel_get_sys(const char *name,
280					       const char *channel_name)
281{
282	struct iio_map_internal *c_i = NULL, *c = NULL;
283	struct iio_channel *channel;
284	int err;
285
286	if (name == NULL && channel_name == NULL)
287		return ERR_PTR(-ENODEV);
288
289	/* first find matching entry the channel map */
290	mutex_lock(&iio_map_list_lock);
291	list_for_each_entry(c_i, &iio_map_list, l) {
292		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
293		    (channel_name &&
294		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
295			continue;
296		c = c_i;
297		iio_device_get(c->indio_dev);
298		break;
299	}
300	mutex_unlock(&iio_map_list_lock);
301	if (c == NULL)
302		return ERR_PTR(-ENODEV);
303
304	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
305	if (channel == NULL) {
306		err = -ENOMEM;
307		goto error_no_mem;
308	}
309
310	channel->indio_dev = c->indio_dev;
311
312	if (c->map->adc_channel_label) {
313		channel->channel =
314			iio_chan_spec_from_name(channel->indio_dev,
315						c->map->adc_channel_label);
316
317		if (channel->channel == NULL) {
318			err = -EINVAL;
319			goto error_no_chan;
320		}
321	}
322
323	return channel;
324
325error_no_chan:
326	kfree(channel);
327error_no_mem:
328	iio_device_put(c->indio_dev);
329	return ERR_PTR(err);
330}
331
332struct iio_channel *iio_channel_get(struct device *dev,
333				    const char *channel_name)
334{
335	const char *name = dev ? dev_name(dev) : NULL;
336	struct iio_channel *channel;
337
338	if (dev) {
339		channel = of_iio_channel_get_by_name(dev->of_node,
340						     channel_name);
341		if (channel != NULL)
342			return channel;
343	}
344
345	return iio_channel_get_sys(name, channel_name);
346}
347EXPORT_SYMBOL_GPL(iio_channel_get);
348
349void iio_channel_release(struct iio_channel *channel)
350{
351	if (!channel)
352		return;
353	iio_device_put(channel->indio_dev);
354	kfree(channel);
355}
356EXPORT_SYMBOL_GPL(iio_channel_release);
357
358static void devm_iio_channel_free(struct device *dev, void *res)
359{
360	struct iio_channel *channel = *(struct iio_channel **)res;
361
362	iio_channel_release(channel);
363}
364
365struct iio_channel *devm_iio_channel_get(struct device *dev,
366					 const char *channel_name)
367{
368	struct iio_channel **ptr, *channel;
369
370	ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
371	if (!ptr)
372		return ERR_PTR(-ENOMEM);
373
374	channel = iio_channel_get(dev, channel_name);
375	if (IS_ERR(channel)) {
376		devres_free(ptr);
377		return channel;
378	}
379
380	*ptr = channel;
381	devres_add(dev, ptr);
382
383	return channel;
384}
385EXPORT_SYMBOL_GPL(devm_iio_channel_get);
386
387struct iio_channel *iio_channel_get_all(struct device *dev)
388{
389	const char *name;
390	struct iio_channel *chans;
391	struct iio_map_internal *c = NULL;
392	int nummaps = 0;
393	int mapind = 0;
394	int i, ret;
395
396	if (dev == NULL)
397		return ERR_PTR(-EINVAL);
398
399	chans = of_iio_channel_get_all(dev);
400	if (chans)
401		return chans;
402
403	name = dev_name(dev);
404
405	mutex_lock(&iio_map_list_lock);
406	/* first count the matching maps */
407	list_for_each_entry(c, &iio_map_list, l)
408		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
409			continue;
410		else
411			nummaps++;
412
413	if (nummaps == 0) {
414		ret = -ENODEV;
415		goto error_ret;
416	}
417
418	/* NULL terminated array to save passing size */
419	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
420	if (chans == NULL) {
421		ret = -ENOMEM;
422		goto error_ret;
423	}
424
425	/* for each map fill in the chans element */
426	list_for_each_entry(c, &iio_map_list, l) {
427		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
428			continue;
429		chans[mapind].indio_dev = c->indio_dev;
430		chans[mapind].data = c->map->consumer_data;
431		chans[mapind].channel =
432			iio_chan_spec_from_name(chans[mapind].indio_dev,
433						c->map->adc_channel_label);
434		if (chans[mapind].channel == NULL) {
435			ret = -EINVAL;
436			goto error_free_chans;
437		}
438		iio_device_get(chans[mapind].indio_dev);
439		mapind++;
440	}
441	if (mapind == 0) {
442		ret = -ENODEV;
443		goto error_free_chans;
444	}
445	mutex_unlock(&iio_map_list_lock);
446
447	return chans;
448
449error_free_chans:
450	for (i = 0; i < nummaps; i++)
451		iio_device_put(chans[i].indio_dev);
452	kfree(chans);
453error_ret:
454	mutex_unlock(&iio_map_list_lock);
455
456	return ERR_PTR(ret);
457}
458EXPORT_SYMBOL_GPL(iio_channel_get_all);
459
460void iio_channel_release_all(struct iio_channel *channels)
461{
462	struct iio_channel *chan = &channels[0];
463
464	while (chan->indio_dev) {
465		iio_device_put(chan->indio_dev);
466		chan++;
467	}
468	kfree(channels);
469}
470EXPORT_SYMBOL_GPL(iio_channel_release_all);
471
472static void devm_iio_channel_free_all(struct device *dev, void *res)
473{
474	struct iio_channel *channels = *(struct iio_channel **)res;
475
476	iio_channel_release_all(channels);
477}
478
479struct iio_channel *devm_iio_channel_get_all(struct device *dev)
480{
481	struct iio_channel **ptr, *channels;
482
483	ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
484	if (!ptr)
485		return ERR_PTR(-ENOMEM);
486
487	channels = iio_channel_get_all(dev);
488	if (IS_ERR(channels)) {
489		devres_free(ptr);
490		return channels;
491	}
492
493	*ptr = channels;
494	devres_add(dev, ptr);
495
496	return channels;
497}
498EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
499
500static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
501	enum iio_chan_info_enum info)
502{
503	int unused;
504	int vals[INDIO_MAX_RAW_ELEMENTS];
505	int ret;
506	int val_len = 2;
507
508	if (val2 == NULL)
509		val2 = &unused;
510
511	if (!iio_channel_has_info(chan->channel, info))
512		return -EINVAL;
513
514	if (chan->indio_dev->info->read_raw_multi) {
515		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
516					chan->channel, INDIO_MAX_RAW_ELEMENTS,
517					vals, &val_len, info);
518		*val = vals[0];
519		*val2 = vals[1];
520	} else
521		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
522					chan->channel, val, val2, info);
523
524	return ret;
525}
526
527int iio_read_channel_raw(struct iio_channel *chan, int *val)
528{
529	int ret;
530
531	mutex_lock(&chan->indio_dev->info_exist_lock);
532	if (chan->indio_dev->info == NULL) {
533		ret = -ENODEV;
534		goto err_unlock;
535	}
536
537	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
538err_unlock:
539	mutex_unlock(&chan->indio_dev->info_exist_lock);
540
541	return ret;
542}
543EXPORT_SYMBOL_GPL(iio_read_channel_raw);
544
545int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
546{
547	int ret;
548
549	mutex_lock(&chan->indio_dev->info_exist_lock);
550	if (chan->indio_dev->info == NULL) {
551		ret = -ENODEV;
552		goto err_unlock;
553	}
554
555	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
556err_unlock:
557	mutex_unlock(&chan->indio_dev->info_exist_lock);
558
559	return ret;
560}
561EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
562
563static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
564	int raw, int *processed, unsigned int scale)
565{
566	int scale_type, scale_val, scale_val2;
567	int offset_type, offset_val, offset_val2;
568	s64 raw64 = raw;
569
570	offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
571				       IIO_CHAN_INFO_OFFSET);
572	if (offset_type >= 0) {
573		switch (offset_type) {
574		case IIO_VAL_INT:
575			break;
576		case IIO_VAL_INT_PLUS_MICRO:
577		case IIO_VAL_INT_PLUS_NANO:
578			/*
579			 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
580			 * implicitely truncate the offset to it's integer form.
581			 */
582			break;
583		case IIO_VAL_FRACTIONAL:
584			offset_val /= offset_val2;
585			break;
586		case IIO_VAL_FRACTIONAL_LOG2:
587			offset_val >>= offset_val2;
588			break;
589		default:
590			return -EINVAL;
591		}
592
593		raw64 += offset_val;
594	}
595
596	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
597					IIO_CHAN_INFO_SCALE);
598	if (scale_type < 0) {
599		/*
600		 * If no channel scaling is available apply consumer scale to
601		 * raw value and return.
602		 */
603		*processed = raw * scale;
604		return 0;
605	}
606
607	switch (scale_type) {
608	case IIO_VAL_INT:
609		*processed = raw64 * scale_val * scale;
610		break;
611	case IIO_VAL_INT_PLUS_MICRO:
612		if (scale_val2 < 0)
613			*processed = -raw64 * scale_val;
614		else
615			*processed = raw64 * scale_val;
616		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
617				      1000000LL);
618		break;
619	case IIO_VAL_INT_PLUS_NANO:
620		if (scale_val2 < 0)
621			*processed = -raw64 * scale_val;
622		else
623			*processed = raw64 * scale_val;
624		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
625				      1000000000LL);
626		break;
627	case IIO_VAL_FRACTIONAL:
628		*processed = div_s64(raw64 * (s64)scale_val * scale,
629				     scale_val2);
630		break;
631	case IIO_VAL_FRACTIONAL_LOG2:
632		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
633		break;
634	default:
635		return -EINVAL;
636	}
637
638	return 0;
639}
640
641int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
642	int *processed, unsigned int scale)
643{
644	int ret;
645
646	mutex_lock(&chan->indio_dev->info_exist_lock);
647	if (chan->indio_dev->info == NULL) {
648		ret = -ENODEV;
649		goto err_unlock;
650	}
651
652	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
653							scale);
654err_unlock:
655	mutex_unlock(&chan->indio_dev->info_exist_lock);
656
657	return ret;
658}
659EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
660
661int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
662			       enum iio_chan_info_enum attribute)
663{
664	int ret;
665
666	mutex_lock(&chan->indio_dev->info_exist_lock);
667	if (chan->indio_dev->info == NULL) {
668		ret = -ENODEV;
669		goto err_unlock;
670	}
671
672	ret = iio_channel_read(chan, val, val2, attribute);
673err_unlock:
674	mutex_unlock(&chan->indio_dev->info_exist_lock);
675
676	return ret;
677}
678EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
679
680int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
681{
682	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
683}
684EXPORT_SYMBOL_GPL(iio_read_channel_offset);
685
686int iio_read_channel_processed(struct iio_channel *chan, int *val)
687{
688	int ret;
689
690	mutex_lock(&chan->indio_dev->info_exist_lock);
691	if (chan->indio_dev->info == NULL) {
692		ret = -ENODEV;
693		goto err_unlock;
694	}
695
696	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
697		ret = iio_channel_read(chan, val, NULL,
698				       IIO_CHAN_INFO_PROCESSED);
699	} else {
700		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
701		if (ret < 0)
702			goto err_unlock;
703		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
704	}
705
706err_unlock:
707	mutex_unlock(&chan->indio_dev->info_exist_lock);
708
709	return ret;
710}
711EXPORT_SYMBOL_GPL(iio_read_channel_processed);
712
713int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
714{
715	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
716}
717EXPORT_SYMBOL_GPL(iio_read_channel_scale);
718
719static int iio_channel_read_avail(struct iio_channel *chan,
720				  const int **vals, int *type, int *length,
721				  enum iio_chan_info_enum info)
722{
723	if (!iio_channel_has_available(chan->channel, info))
724		return -EINVAL;
725
726	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
727						 vals, type, length, info);
728}
729
730int iio_read_avail_channel_attribute(struct iio_channel *chan,
731				     const int **vals, int *type, int *length,
732				     enum iio_chan_info_enum attribute)
733{
734	int ret;
735
736	mutex_lock(&chan->indio_dev->info_exist_lock);
737	if (!chan->indio_dev->info) {
738		ret = -ENODEV;
739		goto err_unlock;
740	}
741
742	ret = iio_channel_read_avail(chan, vals, type, length, attribute);
743err_unlock:
744	mutex_unlock(&chan->indio_dev->info_exist_lock);
745
746	return ret;
747}
748EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
749
750int iio_read_avail_channel_raw(struct iio_channel *chan,
751			       const int **vals, int *length)
752{
753	int ret;
754	int type;
755
756	ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
757					 IIO_CHAN_INFO_RAW);
758
759	if (ret >= 0 && type != IIO_VAL_INT)
760		/* raw values are assumed to be IIO_VAL_INT */
761		ret = -EINVAL;
762
763	return ret;
764}
765EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
766
767static int iio_channel_read_max(struct iio_channel *chan,
768				int *val, int *val2, int *type,
769				enum iio_chan_info_enum info)
770{
771	int unused;
772	const int *vals;
773	int length;
774	int ret;
775
776	if (!val2)
777		val2 = &unused;
778
779	ret = iio_channel_read_avail(chan, &vals, type, &length, info);
780	switch (ret) {
781	case IIO_AVAIL_RANGE:
782		switch (*type) {
783		case IIO_VAL_INT:
784			*val = vals[2];
785			break;
786		default:
787			*val = vals[4];
788			*val2 = vals[5];
789		}
790		return 0;
791
792	case IIO_AVAIL_LIST:
793		if (length <= 0)
794			return -EINVAL;
795		switch (*type) {
796		case IIO_VAL_INT:
797			*val = vals[--length];
798			while (length) {
799				if (vals[--length] > *val)
800					*val = vals[length];
801			}
802			break;
803		default:
804			/* FIXME: learn about max for other iio values */
805			return -EINVAL;
806		}
807		return 0;
808
809	default:
810		return ret;
811	}
812}
813
814int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
815{
816	int ret;
817	int type;
818
819	mutex_lock(&chan->indio_dev->info_exist_lock);
820	if (!chan->indio_dev->info) {
821		ret = -ENODEV;
822		goto err_unlock;
823	}
824
825	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
826err_unlock:
827	mutex_unlock(&chan->indio_dev->info_exist_lock);
828
829	return ret;
830}
831EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
832
833int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
834{
835	int ret = 0;
836	/* Need to verify underlying driver has not gone away */
837
838	mutex_lock(&chan->indio_dev->info_exist_lock);
839	if (chan->indio_dev->info == NULL) {
840		ret = -ENODEV;
841		goto err_unlock;
842	}
843
844	*type = chan->channel->type;
845err_unlock:
846	mutex_unlock(&chan->indio_dev->info_exist_lock);
847
848	return ret;
849}
850EXPORT_SYMBOL_GPL(iio_get_channel_type);
851
852static int iio_channel_write(struct iio_channel *chan, int val, int val2,
853			     enum iio_chan_info_enum info)
854{
855	return chan->indio_dev->info->write_raw(chan->indio_dev,
856						chan->channel, val, val2, info);
857}
858
859int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
860				enum iio_chan_info_enum attribute)
861{
862	int ret;
863
864	mutex_lock(&chan->indio_dev->info_exist_lock);
865	if (chan->indio_dev->info == NULL) {
866		ret = -ENODEV;
867		goto err_unlock;
868	}
869
870	ret = iio_channel_write(chan, val, val2, attribute);
871err_unlock:
872	mutex_unlock(&chan->indio_dev->info_exist_lock);
873
874	return ret;
875}
876EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
877
878int iio_write_channel_raw(struct iio_channel *chan, int val)
879{
880	return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
881}
882EXPORT_SYMBOL_GPL(iio_write_channel_raw);
883
884unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
885{
886	const struct iio_chan_spec_ext_info *ext_info;
887	unsigned int i = 0;
888
889	if (!chan->channel->ext_info)
890		return i;
891
892	for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
893		++i;
894
895	return i;
896}
897EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
898
899static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
900						const struct iio_channel *chan,
901						const char *attr)
902{
903	const struct iio_chan_spec_ext_info *ext_info;
904
905	if (!chan->channel->ext_info)
906		return NULL;
907
908	for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
909		if (!strcmp(attr, ext_info->name))
910			return ext_info;
911	}
912
913	return NULL;
914}
915
916ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
917				  const char *attr, char *buf)
918{
919	const struct iio_chan_spec_ext_info *ext_info;
920
921	ext_info = iio_lookup_ext_info(chan, attr);
922	if (!ext_info)
923		return -EINVAL;
924
925	return ext_info->read(chan->indio_dev, ext_info->private,
926			      chan->channel, buf);
927}
928EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
929
930ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
931				   const char *buf, size_t len)
932{
933	const struct iio_chan_spec_ext_info *ext_info;
934
935	ext_info = iio_lookup_ext_info(chan, attr);
936	if (!ext_info)
937		return -EINVAL;
938
939	return ext_info->write(chan->indio_dev, ext_info->private,
940			       chan->channel, buf, len);
941}
942EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
943