cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

inkern.c (23376B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* The industrial I/O core in kernel channel mapping
      3 *
      4 * Copyright (c) 2011 Jonathan Cameron
      5 */
      6#include <linux/err.h>
      7#include <linux/export.h>
      8#include <linux/slab.h>
      9#include <linux/mutex.h>
     10#include <linux/of.h>
     11
     12#include <linux/iio/iio.h>
     13#include <linux/iio/iio-opaque.h>
     14#include "iio_core.h"
     15#include <linux/iio/machine.h>
     16#include <linux/iio/driver.h>
     17#include <linux/iio/consumer.h>
     18
     19struct iio_map_internal {
     20	struct iio_dev *indio_dev;
     21	struct iio_map *map;
     22	struct list_head l;
     23};
     24
     25static LIST_HEAD(iio_map_list);
     26static DEFINE_MUTEX(iio_map_list_lock);
     27
     28static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
     29{
     30	int ret = -ENODEV;
     31	struct iio_map_internal *mapi, *next;
     32
     33	list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
     34		if (indio_dev == mapi->indio_dev) {
     35			list_del(&mapi->l);
     36			kfree(mapi);
     37			ret = 0;
     38		}
     39	}
     40	return ret;
     41}
     42
     43int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
     44{
     45	int i = 0, ret = 0;
     46	struct iio_map_internal *mapi;
     47
     48	if (maps == NULL)
     49		return 0;
     50
     51	mutex_lock(&iio_map_list_lock);
     52	while (maps[i].consumer_dev_name != NULL) {
     53		mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
     54		if (mapi == NULL) {
     55			ret = -ENOMEM;
     56			goto error_ret;
     57		}
     58		mapi->map = &maps[i];
     59		mapi->indio_dev = indio_dev;
     60		list_add_tail(&mapi->l, &iio_map_list);
     61		i++;
     62	}
     63error_ret:
     64	if (ret)
     65		iio_map_array_unregister_locked(indio_dev);
     66	mutex_unlock(&iio_map_list_lock);
     67
     68	return ret;
     69}
     70EXPORT_SYMBOL_GPL(iio_map_array_register);
     71
     72
     73/*
     74 * Remove all map entries associated with the given iio device
     75 */
     76int iio_map_array_unregister(struct iio_dev *indio_dev)
     77{
     78	int ret;
     79
     80	mutex_lock(&iio_map_list_lock);
     81	ret = iio_map_array_unregister_locked(indio_dev);
     82	mutex_unlock(&iio_map_list_lock);
     83
     84	return ret;
     85}
     86EXPORT_SYMBOL_GPL(iio_map_array_unregister);
     87
     88static void iio_map_array_unregister_cb(void *indio_dev)
     89{
     90	iio_map_array_unregister(indio_dev);
     91}
     92
     93int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps)
     94{
     95	int ret;
     96
     97	ret = iio_map_array_register(indio_dev, maps);
     98	if (ret)
     99		return ret;
    100
    101	return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
    102}
    103EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
    104
    105static const struct iio_chan_spec
    106*iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
    107{
    108	int i;
    109	const struct iio_chan_spec *chan = NULL;
    110
    111	for (i = 0; i < indio_dev->num_channels; i++)
    112		if (indio_dev->channels[i].datasheet_name &&
    113		    strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
    114			chan = &indio_dev->channels[i];
    115			break;
    116		}
    117	return chan;
    118}
    119
    120#ifdef CONFIG_OF
    121
    122static int iio_dev_node_match(struct device *dev, const void *data)
    123{
    124	return dev->of_node == data && dev->type == &iio_device_type;
    125}
    126
    127/**
    128 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
    129 * @indio_dev:	pointer to the iio_dev structure
    130 * @iiospec:	IIO specifier as found in the device tree
    131 *
    132 * This is simple translation function, suitable for the most 1:1 mapped
    133 * channels in IIO chips. This function performs only one sanity check:
    134 * whether IIO index is less than num_channels (that is specified in the
    135 * iio_dev).
    136 */
    137static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
    138				const struct of_phandle_args *iiospec)
    139{
    140	if (!iiospec->args_count)
    141		return 0;
    142
    143	if (iiospec->args[0] >= indio_dev->num_channels) {
    144		dev_err(&indio_dev->dev, "invalid channel index %u\n",
    145			iiospec->args[0]);
    146		return -EINVAL;
    147	}
    148
    149	return iiospec->args[0];
    150}
    151
    152static int __of_iio_channel_get(struct iio_channel *channel,
    153				struct device_node *np, int index)
    154{
    155	struct device *idev;
    156	struct iio_dev *indio_dev;
    157	int err;
    158	struct of_phandle_args iiospec;
    159
    160	err = of_parse_phandle_with_args(np, "io-channels",
    161					 "#io-channel-cells",
    162					 index, &iiospec);
    163	if (err)
    164		return err;
    165
    166	idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
    167			       iio_dev_node_match);
    168	of_node_put(iiospec.np);
    169	if (idev == NULL)
    170		return -EPROBE_DEFER;
    171
    172	indio_dev = dev_to_iio_dev(idev);
    173	channel->indio_dev = indio_dev;
    174	if (indio_dev->info->of_xlate)
    175		index = indio_dev->info->of_xlate(indio_dev, &iiospec);
    176	else
    177		index = __of_iio_simple_xlate(indio_dev, &iiospec);
    178	if (index < 0)
    179		goto err_put;
    180	channel->channel = &indio_dev->channels[index];
    181
    182	return 0;
    183
    184err_put:
    185	iio_device_put(indio_dev);
    186	return index;
    187}
    188
    189static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
    190{
    191	struct iio_channel *channel;
    192	int err;
    193
    194	if (index < 0)
    195		return ERR_PTR(-EINVAL);
    196
    197	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
    198	if (channel == NULL)
    199		return ERR_PTR(-ENOMEM);
    200
    201	err = __of_iio_channel_get(channel, np, index);
    202	if (err)
    203		goto err_free_channel;
    204
    205	return channel;
    206
    207err_free_channel:
    208	kfree(channel);
    209	return ERR_PTR(err);
    210}
    211
    212struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
    213					       const char *name)
    214{
    215	struct iio_channel *chan = NULL;
    216
    217	/* Walk up the tree of devices looking for a matching iio channel */
    218	while (np) {
    219		int index = 0;
    220
    221		/*
    222		 * For named iio channels, first look up the name in the
    223		 * "io-channel-names" property.  If it cannot be found, the
    224		 * index will be an error code, and of_iio_channel_get()
    225		 * will fail.
    226		 */
    227		if (name)
    228			index = of_property_match_string(np, "io-channel-names",
    229							 name);
    230		chan = of_iio_channel_get(np, index);
    231		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
    232			break;
    233		else if (name && index >= 0) {
    234			pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
    235				np, name ? name : "", index);
    236			return NULL;
    237		}
    238
    239		/*
    240		 * No matching IIO channel found on this node.
    241		 * If the parent node has a "io-channel-ranges" property,
    242		 * then we can try one of its channels.
    243		 */
    244		np = np->parent;
    245		if (np && !of_get_property(np, "io-channel-ranges", NULL))
    246			return NULL;
    247	}
    248
    249	return chan;
    250}
    251EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
    252
    253static struct iio_channel *of_iio_channel_get_all(struct device *dev)
    254{
    255	struct iio_channel *chans;
    256	int i, mapind, nummaps = 0;
    257	int ret;
    258
    259	do {
    260		ret = of_parse_phandle_with_args(dev->of_node,
    261						 "io-channels",
    262						 "#io-channel-cells",
    263						 nummaps, NULL);
    264		if (ret < 0)
    265			break;
    266	} while (++nummaps);
    267
    268	if (nummaps == 0)	/* no error, return NULL to search map table */
    269		return NULL;
    270
    271	/* NULL terminated array to save passing size */
    272	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
    273	if (chans == NULL)
    274		return ERR_PTR(-ENOMEM);
    275
    276	/* Search for OF matches */
    277	for (mapind = 0; mapind < nummaps; mapind++) {
    278		ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
    279					   mapind);
    280		if (ret)
    281			goto error_free_chans;
    282	}
    283	return chans;
    284
    285error_free_chans:
    286	for (i = 0; i < mapind; i++)
    287		iio_device_put(chans[i].indio_dev);
    288	kfree(chans);
    289	return ERR_PTR(ret);
    290}
    291
    292#else /* CONFIG_OF */
    293
    294static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
    295{
    296	return NULL;
    297}
    298
    299#endif /* CONFIG_OF */
    300
    301static struct iio_channel *iio_channel_get_sys(const char *name,
    302					       const char *channel_name)
    303{
    304	struct iio_map_internal *c_i = NULL, *c = NULL;
    305	struct iio_channel *channel;
    306	int err;
    307
    308	if (name == NULL && channel_name == NULL)
    309		return ERR_PTR(-ENODEV);
    310
    311	/* first find matching entry the channel map */
    312	mutex_lock(&iio_map_list_lock);
    313	list_for_each_entry(c_i, &iio_map_list, l) {
    314		if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
    315		    (channel_name &&
    316		     strcmp(channel_name, c_i->map->consumer_channel) != 0))
    317			continue;
    318		c = c_i;
    319		iio_device_get(c->indio_dev);
    320		break;
    321	}
    322	mutex_unlock(&iio_map_list_lock);
    323	if (c == NULL)
    324		return ERR_PTR(-ENODEV);
    325
    326	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
    327	if (channel == NULL) {
    328		err = -ENOMEM;
    329		goto error_no_mem;
    330	}
    331
    332	channel->indio_dev = c->indio_dev;
    333
    334	if (c->map->adc_channel_label) {
    335		channel->channel =
    336			iio_chan_spec_from_name(channel->indio_dev,
    337						c->map->adc_channel_label);
    338
    339		if (channel->channel == NULL) {
    340			err = -EINVAL;
    341			goto error_no_chan;
    342		}
    343	}
    344
    345	return channel;
    346
    347error_no_chan:
    348	kfree(channel);
    349error_no_mem:
    350	iio_device_put(c->indio_dev);
    351	return ERR_PTR(err);
    352}
    353
    354struct iio_channel *iio_channel_get(struct device *dev,
    355				    const char *channel_name)
    356{
    357	const char *name = dev ? dev_name(dev) : NULL;
    358	struct iio_channel *channel;
    359
    360	if (dev) {
    361		channel = of_iio_channel_get_by_name(dev->of_node,
    362						     channel_name);
    363		if (channel != NULL)
    364			return channel;
    365	}
    366
    367	return iio_channel_get_sys(name, channel_name);
    368}
    369EXPORT_SYMBOL_GPL(iio_channel_get);
    370
    371void iio_channel_release(struct iio_channel *channel)
    372{
    373	if (!channel)
    374		return;
    375	iio_device_put(channel->indio_dev);
    376	kfree(channel);
    377}
    378EXPORT_SYMBOL_GPL(iio_channel_release);
    379
    380static void devm_iio_channel_free(void *iio_channel)
    381{
    382	iio_channel_release(iio_channel);
    383}
    384
    385struct iio_channel *devm_iio_channel_get(struct device *dev,
    386					 const char *channel_name)
    387{
    388	struct iio_channel *channel;
    389	int ret;
    390
    391	channel = iio_channel_get(dev, channel_name);
    392	if (IS_ERR(channel))
    393		return channel;
    394
    395	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
    396	if (ret)
    397		return ERR_PTR(ret);
    398
    399	return channel;
    400}
    401EXPORT_SYMBOL_GPL(devm_iio_channel_get);
    402
    403struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
    404						    struct device_node *np,
    405						    const char *channel_name)
    406{
    407	struct iio_channel *channel;
    408	int ret;
    409
    410	channel = of_iio_channel_get_by_name(np, channel_name);
    411	if (IS_ERR(channel))
    412		return channel;
    413
    414	ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
    415	if (ret)
    416		return ERR_PTR(ret);
    417
    418	return channel;
    419}
    420EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
    421
    422struct iio_channel *iio_channel_get_all(struct device *dev)
    423{
    424	const char *name;
    425	struct iio_channel *chans;
    426	struct iio_map_internal *c = NULL;
    427	int nummaps = 0;
    428	int mapind = 0;
    429	int i, ret;
    430
    431	if (dev == NULL)
    432		return ERR_PTR(-EINVAL);
    433
    434	chans = of_iio_channel_get_all(dev);
    435	if (chans)
    436		return chans;
    437
    438	name = dev_name(dev);
    439
    440	mutex_lock(&iio_map_list_lock);
    441	/* first count the matching maps */
    442	list_for_each_entry(c, &iio_map_list, l)
    443		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
    444			continue;
    445		else
    446			nummaps++;
    447
    448	if (nummaps == 0) {
    449		ret = -ENODEV;
    450		goto error_ret;
    451	}
    452
    453	/* NULL terminated array to save passing size */
    454	chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
    455	if (chans == NULL) {
    456		ret = -ENOMEM;
    457		goto error_ret;
    458	}
    459
    460	/* for each map fill in the chans element */
    461	list_for_each_entry(c, &iio_map_list, l) {
    462		if (name && strcmp(name, c->map->consumer_dev_name) != 0)
    463			continue;
    464		chans[mapind].indio_dev = c->indio_dev;
    465		chans[mapind].data = c->map->consumer_data;
    466		chans[mapind].channel =
    467			iio_chan_spec_from_name(chans[mapind].indio_dev,
    468						c->map->adc_channel_label);
    469		if (chans[mapind].channel == NULL) {
    470			ret = -EINVAL;
    471			goto error_free_chans;
    472		}
    473		iio_device_get(chans[mapind].indio_dev);
    474		mapind++;
    475	}
    476	if (mapind == 0) {
    477		ret = -ENODEV;
    478		goto error_free_chans;
    479	}
    480	mutex_unlock(&iio_map_list_lock);
    481
    482	return chans;
    483
    484error_free_chans:
    485	for (i = 0; i < nummaps; i++)
    486		iio_device_put(chans[i].indio_dev);
    487	kfree(chans);
    488error_ret:
    489	mutex_unlock(&iio_map_list_lock);
    490
    491	return ERR_PTR(ret);
    492}
    493EXPORT_SYMBOL_GPL(iio_channel_get_all);
    494
    495void iio_channel_release_all(struct iio_channel *channels)
    496{
    497	struct iio_channel *chan = &channels[0];
    498
    499	while (chan->indio_dev) {
    500		iio_device_put(chan->indio_dev);
    501		chan++;
    502	}
    503	kfree(channels);
    504}
    505EXPORT_SYMBOL_GPL(iio_channel_release_all);
    506
    507static void devm_iio_channel_free_all(void *iio_channels)
    508{
    509	iio_channel_release_all(iio_channels);
    510}
    511
    512struct iio_channel *devm_iio_channel_get_all(struct device *dev)
    513{
    514	struct iio_channel *channels;
    515	int ret;
    516
    517	channels = iio_channel_get_all(dev);
    518	if (IS_ERR(channels))
    519		return channels;
    520
    521	ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
    522				       channels);
    523	if (ret)
    524		return ERR_PTR(ret);
    525
    526	return channels;
    527}
    528EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
    529
    530static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
    531	enum iio_chan_info_enum info)
    532{
    533	int unused;
    534	int vals[INDIO_MAX_RAW_ELEMENTS];
    535	int ret;
    536	int val_len = 2;
    537
    538	if (val2 == NULL)
    539		val2 = &unused;
    540
    541	if (!iio_channel_has_info(chan->channel, info))
    542		return -EINVAL;
    543
    544	if (chan->indio_dev->info->read_raw_multi) {
    545		ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
    546					chan->channel, INDIO_MAX_RAW_ELEMENTS,
    547					vals, &val_len, info);
    548		*val = vals[0];
    549		*val2 = vals[1];
    550	} else
    551		ret = chan->indio_dev->info->read_raw(chan->indio_dev,
    552					chan->channel, val, val2, info);
    553
    554	return ret;
    555}
    556
    557int iio_read_channel_raw(struct iio_channel *chan, int *val)
    558{
    559	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    560	int ret;
    561
    562	mutex_lock(&iio_dev_opaque->info_exist_lock);
    563	if (chan->indio_dev->info == NULL) {
    564		ret = -ENODEV;
    565		goto err_unlock;
    566	}
    567
    568	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
    569err_unlock:
    570	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    571
    572	return ret;
    573}
    574EXPORT_SYMBOL_GPL(iio_read_channel_raw);
    575
    576int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
    577{
    578	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    579	int ret;
    580
    581	mutex_lock(&iio_dev_opaque->info_exist_lock);
    582	if (chan->indio_dev->info == NULL) {
    583		ret = -ENODEV;
    584		goto err_unlock;
    585	}
    586
    587	ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
    588err_unlock:
    589	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    590
    591	return ret;
    592}
    593EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
    594
    595static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
    596	int raw, int *processed, unsigned int scale)
    597{
    598	int scale_type, scale_val, scale_val2;
    599	int offset_type, offset_val, offset_val2;
    600	s64 raw64 = raw;
    601
    602	offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
    603				       IIO_CHAN_INFO_OFFSET);
    604	if (offset_type >= 0) {
    605		switch (offset_type) {
    606		case IIO_VAL_INT:
    607			break;
    608		case IIO_VAL_INT_PLUS_MICRO:
    609		case IIO_VAL_INT_PLUS_NANO:
    610			/*
    611			 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
    612			 * implicitely truncate the offset to it's integer form.
    613			 */
    614			break;
    615		case IIO_VAL_FRACTIONAL:
    616			offset_val /= offset_val2;
    617			break;
    618		case IIO_VAL_FRACTIONAL_LOG2:
    619			offset_val >>= offset_val2;
    620			break;
    621		default:
    622			return -EINVAL;
    623		}
    624
    625		raw64 += offset_val;
    626	}
    627
    628	scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
    629					IIO_CHAN_INFO_SCALE);
    630	if (scale_type < 0) {
    631		/*
    632		 * If no channel scaling is available apply consumer scale to
    633		 * raw value and return.
    634		 */
    635		*processed = raw * scale;
    636		return 0;
    637	}
    638
    639	switch (scale_type) {
    640	case IIO_VAL_INT:
    641		*processed = raw64 * scale_val * scale;
    642		break;
    643	case IIO_VAL_INT_PLUS_MICRO:
    644		if (scale_val2 < 0)
    645			*processed = -raw64 * scale_val;
    646		else
    647			*processed = raw64 * scale_val;
    648		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
    649				      1000000LL);
    650		break;
    651	case IIO_VAL_INT_PLUS_NANO:
    652		if (scale_val2 < 0)
    653			*processed = -raw64 * scale_val;
    654		else
    655			*processed = raw64 * scale_val;
    656		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
    657				      1000000000LL);
    658		break;
    659	case IIO_VAL_FRACTIONAL:
    660		*processed = div_s64(raw64 * (s64)scale_val * scale,
    661				     scale_val2);
    662		break;
    663	case IIO_VAL_FRACTIONAL_LOG2:
    664		*processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
    665		break;
    666	default:
    667		return -EINVAL;
    668	}
    669
    670	return 0;
    671}
    672
    673int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
    674	int *processed, unsigned int scale)
    675{
    676	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    677	int ret;
    678
    679	mutex_lock(&iio_dev_opaque->info_exist_lock);
    680	if (chan->indio_dev->info == NULL) {
    681		ret = -ENODEV;
    682		goto err_unlock;
    683	}
    684
    685	ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
    686							scale);
    687err_unlock:
    688	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    689
    690	return ret;
    691}
    692EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
    693
    694int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
    695			       enum iio_chan_info_enum attribute)
    696{
    697	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    698	int ret;
    699
    700	mutex_lock(&iio_dev_opaque->info_exist_lock);
    701	if (chan->indio_dev->info == NULL) {
    702		ret = -ENODEV;
    703		goto err_unlock;
    704	}
    705
    706	ret = iio_channel_read(chan, val, val2, attribute);
    707err_unlock:
    708	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    709
    710	return ret;
    711}
    712EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
    713
    714int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
    715{
    716	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
    717}
    718EXPORT_SYMBOL_GPL(iio_read_channel_offset);
    719
    720int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
    721				     unsigned int scale)
    722{
    723	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    724	int ret;
    725
    726	mutex_lock(&iio_dev_opaque->info_exist_lock);
    727	if (chan->indio_dev->info == NULL) {
    728		ret = -ENODEV;
    729		goto err_unlock;
    730	}
    731
    732	if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
    733		ret = iio_channel_read(chan, val, NULL,
    734				       IIO_CHAN_INFO_PROCESSED);
    735		if (ret < 0)
    736			goto err_unlock;
    737		*val *= scale;
    738	} else {
    739		ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
    740		if (ret < 0)
    741			goto err_unlock;
    742		ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
    743							    scale);
    744	}
    745
    746err_unlock:
    747	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    748
    749	return ret;
    750}
    751EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
    752
    753int iio_read_channel_processed(struct iio_channel *chan, int *val)
    754{
    755	/* This is just a special case with scale factor 1 */
    756	return iio_read_channel_processed_scale(chan, val, 1);
    757}
    758EXPORT_SYMBOL_GPL(iio_read_channel_processed);
    759
    760int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
    761{
    762	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
    763}
    764EXPORT_SYMBOL_GPL(iio_read_channel_scale);
    765
    766static int iio_channel_read_avail(struct iio_channel *chan,
    767				  const int **vals, int *type, int *length,
    768				  enum iio_chan_info_enum info)
    769{
    770	if (!iio_channel_has_available(chan->channel, info))
    771		return -EINVAL;
    772
    773	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
    774						 vals, type, length, info);
    775}
    776
    777int iio_read_avail_channel_attribute(struct iio_channel *chan,
    778				     const int **vals, int *type, int *length,
    779				     enum iio_chan_info_enum attribute)
    780{
    781	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    782	int ret;
    783
    784	mutex_lock(&iio_dev_opaque->info_exist_lock);
    785	if (!chan->indio_dev->info) {
    786		ret = -ENODEV;
    787		goto err_unlock;
    788	}
    789
    790	ret = iio_channel_read_avail(chan, vals, type, length, attribute);
    791err_unlock:
    792	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    793
    794	return ret;
    795}
    796EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
    797
    798int iio_read_avail_channel_raw(struct iio_channel *chan,
    799			       const int **vals, int *length)
    800{
    801	int ret;
    802	int type;
    803
    804	ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
    805					 IIO_CHAN_INFO_RAW);
    806
    807	if (ret >= 0 && type != IIO_VAL_INT)
    808		/* raw values are assumed to be IIO_VAL_INT */
    809		ret = -EINVAL;
    810
    811	return ret;
    812}
    813EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
    814
    815static int iio_channel_read_max(struct iio_channel *chan,
    816				int *val, int *val2, int *type,
    817				enum iio_chan_info_enum info)
    818{
    819	int unused;
    820	const int *vals;
    821	int length;
    822	int ret;
    823
    824	if (!val2)
    825		val2 = &unused;
    826
    827	ret = iio_channel_read_avail(chan, &vals, type, &length, info);
    828	switch (ret) {
    829	case IIO_AVAIL_RANGE:
    830		switch (*type) {
    831		case IIO_VAL_INT:
    832			*val = vals[2];
    833			break;
    834		default:
    835			*val = vals[4];
    836			*val2 = vals[5];
    837		}
    838		return 0;
    839
    840	case IIO_AVAIL_LIST:
    841		if (length <= 0)
    842			return -EINVAL;
    843		switch (*type) {
    844		case IIO_VAL_INT:
    845			*val = vals[--length];
    846			while (length) {
    847				if (vals[--length] > *val)
    848					*val = vals[length];
    849			}
    850			break;
    851		default:
    852			/* FIXME: learn about max for other iio values */
    853			return -EINVAL;
    854		}
    855		return 0;
    856
    857	default:
    858		return ret;
    859	}
    860}
    861
    862int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
    863{
    864	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    865	int ret;
    866	int type;
    867
    868	mutex_lock(&iio_dev_opaque->info_exist_lock);
    869	if (!chan->indio_dev->info) {
    870		ret = -ENODEV;
    871		goto err_unlock;
    872	}
    873
    874	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
    875err_unlock:
    876	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    877
    878	return ret;
    879}
    880EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
    881
    882int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
    883{
    884	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    885	int ret = 0;
    886	/* Need to verify underlying driver has not gone away */
    887
    888	mutex_lock(&iio_dev_opaque->info_exist_lock);
    889	if (chan->indio_dev->info == NULL) {
    890		ret = -ENODEV;
    891		goto err_unlock;
    892	}
    893
    894	*type = chan->channel->type;
    895err_unlock:
    896	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    897
    898	return ret;
    899}
    900EXPORT_SYMBOL_GPL(iio_get_channel_type);
    901
    902static int iio_channel_write(struct iio_channel *chan, int val, int val2,
    903			     enum iio_chan_info_enum info)
    904{
    905	return chan->indio_dev->info->write_raw(chan->indio_dev,
    906						chan->channel, val, val2, info);
    907}
    908
    909int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
    910				enum iio_chan_info_enum attribute)
    911{
    912	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
    913	int ret;
    914
    915	mutex_lock(&iio_dev_opaque->info_exist_lock);
    916	if (chan->indio_dev->info == NULL) {
    917		ret = -ENODEV;
    918		goto err_unlock;
    919	}
    920
    921	ret = iio_channel_write(chan, val, val2, attribute);
    922err_unlock:
    923	mutex_unlock(&iio_dev_opaque->info_exist_lock);
    924
    925	return ret;
    926}
    927EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
    928
    929int iio_write_channel_raw(struct iio_channel *chan, int val)
    930{
    931	return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
    932}
    933EXPORT_SYMBOL_GPL(iio_write_channel_raw);
    934
    935unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
    936{
    937	const struct iio_chan_spec_ext_info *ext_info;
    938	unsigned int i = 0;
    939
    940	if (!chan->channel->ext_info)
    941		return i;
    942
    943	for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
    944		++i;
    945
    946	return i;
    947}
    948EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
    949
    950static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
    951						const struct iio_channel *chan,
    952						const char *attr)
    953{
    954	const struct iio_chan_spec_ext_info *ext_info;
    955
    956	if (!chan->channel->ext_info)
    957		return NULL;
    958
    959	for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
    960		if (!strcmp(attr, ext_info->name))
    961			return ext_info;
    962	}
    963
    964	return NULL;
    965}
    966
    967ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
    968				  const char *attr, char *buf)
    969{
    970	const struct iio_chan_spec_ext_info *ext_info;
    971
    972	ext_info = iio_lookup_ext_info(chan, attr);
    973	if (!ext_info)
    974		return -EINVAL;
    975
    976	return ext_info->read(chan->indio_dev, ext_info->private,
    977			      chan->channel, buf);
    978}
    979EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
    980
    981ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
    982				   const char *buf, size_t len)
    983{
    984	const struct iio_chan_spec_ext_info *ext_info;
    985
    986	ext_info = iio_lookup_ext_info(chan, attr);
    987	if (!ext_info)
    988		return -EINVAL;
    989
    990	return ext_info->write(chan->indio_dev, ext_info->private,
    991			       chan->channel, buf, len);
    992}
    993EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);