cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

of.c (40644B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Generic OPP OF helpers
      4 *
      5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
      6 *	Nishanth Menon
      7 *	Romit Dasgupta
      8 *	Kevin Hilman
      9 */
     10
     11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     12
     13#include <linux/cpu.h>
     14#include <linux/errno.h>
     15#include <linux/device.h>
     16#include <linux/of_device.h>
     17#include <linux/pm_domain.h>
     18#include <linux/slab.h>
     19#include <linux/export.h>
     20#include <linux/energy_model.h>
     21
     22#include "opp.h"
     23
     24/*
     25 * Returns opp descriptor node for a device node, caller must
     26 * do of_node_put().
     27 */
     28static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
     29						     int index)
     30{
     31	/* "operating-points-v2" can be an array for power domain providers */
     32	return of_parse_phandle(np, "operating-points-v2", index);
     33}
     34
     35/* Returns opp descriptor node for a device, caller must do of_node_put() */
     36struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
     37{
     38	return _opp_of_get_opp_desc_node(dev->of_node, 0);
     39}
     40EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
     41
     42struct opp_table *_managed_opp(struct device *dev, int index)
     43{
     44	struct opp_table *opp_table, *managed_table = NULL;
     45	struct device_node *np;
     46
     47	np = _opp_of_get_opp_desc_node(dev->of_node, index);
     48	if (!np)
     49		return NULL;
     50
     51	list_for_each_entry(opp_table, &opp_tables, node) {
     52		if (opp_table->np == np) {
     53			/*
     54			 * Multiple devices can point to the same OPP table and
     55			 * so will have same node-pointer, np.
     56			 *
     57			 * But the OPPs will be considered as shared only if the
     58			 * OPP table contains a "opp-shared" property.
     59			 */
     60			if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
     61				_get_opp_table_kref(opp_table);
     62				managed_table = opp_table;
     63			}
     64
     65			break;
     66		}
     67	}
     68
     69	of_node_put(np);
     70
     71	return managed_table;
     72}
     73
     74/* The caller must call dev_pm_opp_put() after the OPP is used */
     75static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
     76					  struct device_node *opp_np)
     77{
     78	struct dev_pm_opp *opp;
     79
     80	mutex_lock(&opp_table->lock);
     81
     82	list_for_each_entry(opp, &opp_table->opp_list, node) {
     83		if (opp->np == opp_np) {
     84			dev_pm_opp_get(opp);
     85			mutex_unlock(&opp_table->lock);
     86			return opp;
     87		}
     88	}
     89
     90	mutex_unlock(&opp_table->lock);
     91
     92	return NULL;
     93}
     94
     95static struct device_node *of_parse_required_opp(struct device_node *np,
     96						 int index)
     97{
     98	return of_parse_phandle(np, "required-opps", index);
     99}
    100
    101/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
    102static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
    103{
    104	struct opp_table *opp_table;
    105	struct device_node *opp_table_np;
    106
    107	opp_table_np = of_get_parent(opp_np);
    108	if (!opp_table_np)
    109		goto err;
    110
    111	/* It is safe to put the node now as all we need now is its address */
    112	of_node_put(opp_table_np);
    113
    114	mutex_lock(&opp_table_lock);
    115	list_for_each_entry(opp_table, &opp_tables, node) {
    116		if (opp_table_np == opp_table->np) {
    117			_get_opp_table_kref(opp_table);
    118			mutex_unlock(&opp_table_lock);
    119			return opp_table;
    120		}
    121	}
    122	mutex_unlock(&opp_table_lock);
    123
    124err:
    125	return ERR_PTR(-ENODEV);
    126}
    127
    128/* Free resources previously acquired by _opp_table_alloc_required_tables() */
    129static void _opp_table_free_required_tables(struct opp_table *opp_table)
    130{
    131	struct opp_table **required_opp_tables = opp_table->required_opp_tables;
    132	int i;
    133
    134	if (!required_opp_tables)
    135		return;
    136
    137	for (i = 0; i < opp_table->required_opp_count; i++) {
    138		if (IS_ERR_OR_NULL(required_opp_tables[i]))
    139			continue;
    140
    141		dev_pm_opp_put_opp_table(required_opp_tables[i]);
    142	}
    143
    144	kfree(required_opp_tables);
    145
    146	opp_table->required_opp_count = 0;
    147	opp_table->required_opp_tables = NULL;
    148	list_del(&opp_table->lazy);
    149}
    150
    151/*
    152 * Populate all devices and opp tables which are part of "required-opps" list.
    153 * Checking only the first OPP node should be enough.
    154 */
    155static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
    156					     struct device *dev,
    157					     struct device_node *opp_np)
    158{
    159	struct opp_table **required_opp_tables;
    160	struct device_node *required_np, *np;
    161	bool lazy = false;
    162	int count, i;
    163
    164	/* Traversing the first OPP node is all we need */
    165	np = of_get_next_available_child(opp_np, NULL);
    166	if (!np) {
    167		dev_warn(dev, "Empty OPP table\n");
    168
    169		return;
    170	}
    171
    172	count = of_count_phandle_with_args(np, "required-opps", NULL);
    173	if (count <= 0)
    174		goto put_np;
    175
    176	required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
    177				      GFP_KERNEL);
    178	if (!required_opp_tables)
    179		goto put_np;
    180
    181	opp_table->required_opp_tables = required_opp_tables;
    182	opp_table->required_opp_count = count;
    183
    184	for (i = 0; i < count; i++) {
    185		required_np = of_parse_required_opp(np, i);
    186		if (!required_np)
    187			goto free_required_tables;
    188
    189		required_opp_tables[i] = _find_table_of_opp_np(required_np);
    190		of_node_put(required_np);
    191
    192		if (IS_ERR(required_opp_tables[i]))
    193			lazy = true;
    194	}
    195
    196	/* Let's do the linking later on */
    197	if (lazy)
    198		list_add(&opp_table->lazy, &lazy_opp_tables);
    199
    200	goto put_np;
    201
    202free_required_tables:
    203	_opp_table_free_required_tables(opp_table);
    204put_np:
    205	of_node_put(np);
    206}
    207
    208void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
    209			int index)
    210{
    211	struct device_node *np, *opp_np;
    212	u32 val;
    213
    214	/*
    215	 * Only required for backward compatibility with v1 bindings, but isn't
    216	 * harmful for other cases. And so we do it unconditionally.
    217	 */
    218	np = of_node_get(dev->of_node);
    219	if (!np)
    220		return;
    221
    222	if (!of_property_read_u32(np, "clock-latency", &val))
    223		opp_table->clock_latency_ns_max = val;
    224	of_property_read_u32(np, "voltage-tolerance",
    225			     &opp_table->voltage_tolerance_v1);
    226
    227	if (of_find_property(np, "#power-domain-cells", NULL))
    228		opp_table->is_genpd = true;
    229
    230	/* Get OPP table node */
    231	opp_np = _opp_of_get_opp_desc_node(np, index);
    232	of_node_put(np);
    233
    234	if (!opp_np)
    235		return;
    236
    237	if (of_property_read_bool(opp_np, "opp-shared"))
    238		opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
    239	else
    240		opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
    241
    242	opp_table->np = opp_np;
    243
    244	_opp_table_alloc_required_tables(opp_table, dev, opp_np);
    245	of_node_put(opp_np);
    246}
    247
    248void _of_clear_opp_table(struct opp_table *opp_table)
    249{
    250	_opp_table_free_required_tables(opp_table);
    251}
    252
    253/*
    254 * Release all resources previously acquired with a call to
    255 * _of_opp_alloc_required_opps().
    256 */
    257void _of_opp_free_required_opps(struct opp_table *opp_table,
    258				struct dev_pm_opp *opp)
    259{
    260	struct dev_pm_opp **required_opps = opp->required_opps;
    261	int i;
    262
    263	if (!required_opps)
    264		return;
    265
    266	for (i = 0; i < opp_table->required_opp_count; i++) {
    267		if (!required_opps[i])
    268			continue;
    269
    270		/* Put the reference back */
    271		dev_pm_opp_put(required_opps[i]);
    272	}
    273
    274	opp->required_opps = NULL;
    275	kfree(required_opps);
    276}
    277
    278/* Populate all required OPPs which are part of "required-opps" list */
    279static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
    280				       struct dev_pm_opp *opp)
    281{
    282	struct dev_pm_opp **required_opps;
    283	struct opp_table *required_table;
    284	struct device_node *np;
    285	int i, ret, count = opp_table->required_opp_count;
    286
    287	if (!count)
    288		return 0;
    289
    290	required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
    291	if (!required_opps)
    292		return -ENOMEM;
    293
    294	opp->required_opps = required_opps;
    295
    296	for (i = 0; i < count; i++) {
    297		required_table = opp_table->required_opp_tables[i];
    298
    299		/* Required table not added yet, we will link later */
    300		if (IS_ERR_OR_NULL(required_table))
    301			continue;
    302
    303		np = of_parse_required_opp(opp->np, i);
    304		if (unlikely(!np)) {
    305			ret = -ENODEV;
    306			goto free_required_opps;
    307		}
    308
    309		required_opps[i] = _find_opp_of_np(required_table, np);
    310		of_node_put(np);
    311
    312		if (!required_opps[i]) {
    313			pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
    314			       __func__, opp->np, i);
    315			ret = -ENODEV;
    316			goto free_required_opps;
    317		}
    318	}
    319
    320	return 0;
    321
    322free_required_opps:
    323	_of_opp_free_required_opps(opp_table, opp);
    324
    325	return ret;
    326}
    327
    328/* Link required OPPs for an individual OPP */
    329static int lazy_link_required_opps(struct opp_table *opp_table,
    330				   struct opp_table *new_table, int index)
    331{
    332	struct device_node *required_np;
    333	struct dev_pm_opp *opp;
    334
    335	list_for_each_entry(opp, &opp_table->opp_list, node) {
    336		required_np = of_parse_required_opp(opp->np, index);
    337		if (unlikely(!required_np))
    338			return -ENODEV;
    339
    340		opp->required_opps[index] = _find_opp_of_np(new_table, required_np);
    341		of_node_put(required_np);
    342
    343		if (!opp->required_opps[index]) {
    344			pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
    345			       __func__, opp->np, index);
    346			return -ENODEV;
    347		}
    348	}
    349
    350	return 0;
    351}
    352
    353/* Link required OPPs for all OPPs of the newly added OPP table */
    354static void lazy_link_required_opp_table(struct opp_table *new_table)
    355{
    356	struct opp_table *opp_table, *temp, **required_opp_tables;
    357	struct device_node *required_np, *opp_np, *required_table_np;
    358	struct dev_pm_opp *opp;
    359	int i, ret;
    360
    361	mutex_lock(&opp_table_lock);
    362
    363	list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
    364		bool lazy = false;
    365
    366		/* opp_np can't be invalid here */
    367		opp_np = of_get_next_available_child(opp_table->np, NULL);
    368
    369		for (i = 0; i < opp_table->required_opp_count; i++) {
    370			required_opp_tables = opp_table->required_opp_tables;
    371
    372			/* Required opp-table is already parsed */
    373			if (!IS_ERR(required_opp_tables[i]))
    374				continue;
    375
    376			/* required_np can't be invalid here */
    377			required_np = of_parse_required_opp(opp_np, i);
    378			required_table_np = of_get_parent(required_np);
    379
    380			of_node_put(required_table_np);
    381			of_node_put(required_np);
    382
    383			/*
    384			 * Newly added table isn't the required opp-table for
    385			 * opp_table.
    386			 */
    387			if (required_table_np != new_table->np) {
    388				lazy = true;
    389				continue;
    390			}
    391
    392			required_opp_tables[i] = new_table;
    393			_get_opp_table_kref(new_table);
    394
    395			/* Link OPPs now */
    396			ret = lazy_link_required_opps(opp_table, new_table, i);
    397			if (ret) {
    398				/* The OPPs will be marked unusable */
    399				lazy = false;
    400				break;
    401			}
    402		}
    403
    404		of_node_put(opp_np);
    405
    406		/* All required opp-tables found, remove from lazy list */
    407		if (!lazy) {
    408			list_del_init(&opp_table->lazy);
    409
    410			list_for_each_entry(opp, &opp_table->opp_list, node)
    411				_required_opps_available(opp, opp_table->required_opp_count);
    412		}
    413	}
    414
    415	mutex_unlock(&opp_table_lock);
    416}
    417
    418static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
    419{
    420	struct device_node *np, *opp_np;
    421	struct property *prop;
    422
    423	if (!opp_table) {
    424		np = of_node_get(dev->of_node);
    425		if (!np)
    426			return -ENODEV;
    427
    428		opp_np = _opp_of_get_opp_desc_node(np, 0);
    429		of_node_put(np);
    430	} else {
    431		opp_np = of_node_get(opp_table->np);
    432	}
    433
    434	/* Lets not fail in case we are parsing opp-v1 bindings */
    435	if (!opp_np)
    436		return 0;
    437
    438	/* Checking only first OPP is sufficient */
    439	np = of_get_next_available_child(opp_np, NULL);
    440	of_node_put(opp_np);
    441	if (!np) {
    442		dev_err(dev, "OPP table empty\n");
    443		return -EINVAL;
    444	}
    445
    446	prop = of_find_property(np, "opp-peak-kBps", NULL);
    447	of_node_put(np);
    448
    449	if (!prop || !prop->length)
    450		return 0;
    451
    452	return 1;
    453}
    454
    455int dev_pm_opp_of_find_icc_paths(struct device *dev,
    456				 struct opp_table *opp_table)
    457{
    458	struct device_node *np;
    459	int ret, i, count, num_paths;
    460	struct icc_path **paths;
    461
    462	ret = _bandwidth_supported(dev, opp_table);
    463	if (ret == -EINVAL)
    464		return 0; /* Empty OPP table is a valid corner-case, let's not fail */
    465	else if (ret <= 0)
    466		return ret;
    467
    468	ret = 0;
    469
    470	np = of_node_get(dev->of_node);
    471	if (!np)
    472		return 0;
    473
    474	count = of_count_phandle_with_args(np, "interconnects",
    475					   "#interconnect-cells");
    476	of_node_put(np);
    477	if (count < 0)
    478		return 0;
    479
    480	/* two phandles when #interconnect-cells = <1> */
    481	if (count % 2) {
    482		dev_err(dev, "%s: Invalid interconnects values\n", __func__);
    483		return -EINVAL;
    484	}
    485
    486	num_paths = count / 2;
    487	paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
    488	if (!paths)
    489		return -ENOMEM;
    490
    491	for (i = 0; i < num_paths; i++) {
    492		paths[i] = of_icc_get_by_index(dev, i);
    493		if (IS_ERR(paths[i])) {
    494			ret = PTR_ERR(paths[i]);
    495			if (ret != -EPROBE_DEFER) {
    496				dev_err(dev, "%s: Unable to get path%d: %d\n",
    497					__func__, i, ret);
    498			}
    499			goto err;
    500		}
    501	}
    502
    503	if (opp_table) {
    504		opp_table->paths = paths;
    505		opp_table->path_count = num_paths;
    506		return 0;
    507	}
    508
    509err:
    510	while (i--)
    511		icc_put(paths[i]);
    512
    513	kfree(paths);
    514
    515	return ret;
    516}
    517EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
    518
    519static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
    520			      struct device_node *np)
    521{
    522	unsigned int levels = opp_table->supported_hw_count;
    523	int count, versions, ret, i, j;
    524	u32 val;
    525
    526	if (!opp_table->supported_hw) {
    527		/*
    528		 * In the case that no supported_hw has been set by the
    529		 * platform but there is an opp-supported-hw value set for
    530		 * an OPP then the OPP should not be enabled as there is
    531		 * no way to see if the hardware supports it.
    532		 */
    533		if (of_find_property(np, "opp-supported-hw", NULL))
    534			return false;
    535		else
    536			return true;
    537	}
    538
    539	count = of_property_count_u32_elems(np, "opp-supported-hw");
    540	if (count <= 0 || count % levels) {
    541		dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
    542			__func__, count);
    543		return false;
    544	}
    545
    546	versions = count / levels;
    547
    548	/* All levels in at least one of the versions should match */
    549	for (i = 0; i < versions; i++) {
    550		bool supported = true;
    551
    552		for (j = 0; j < levels; j++) {
    553			ret = of_property_read_u32_index(np, "opp-supported-hw",
    554							 i * levels + j, &val);
    555			if (ret) {
    556				dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
    557					 __func__, i * levels + j, ret);
    558				return false;
    559			}
    560
    561			/* Check if the level is supported */
    562			if (!(val & opp_table->supported_hw[j])) {
    563				supported = false;
    564				break;
    565			}
    566		}
    567
    568		if (supported)
    569			return true;
    570	}
    571
    572	return false;
    573}
    574
    575static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
    576			      struct opp_table *opp_table)
    577{
    578	u32 *microvolt, *microamp = NULL, *microwatt = NULL;
    579	int supplies = opp_table->regulator_count;
    580	int vcount, icount, pcount, ret, i, j;
    581	struct property *prop = NULL;
    582	char name[NAME_MAX];
    583
    584	/* Search for "opp-microvolt-<name>" */
    585	if (opp_table->prop_name) {
    586		snprintf(name, sizeof(name), "opp-microvolt-%s",
    587			 opp_table->prop_name);
    588		prop = of_find_property(opp->np, name, NULL);
    589	}
    590
    591	if (!prop) {
    592		/* Search for "opp-microvolt" */
    593		sprintf(name, "opp-microvolt");
    594		prop = of_find_property(opp->np, name, NULL);
    595
    596		/* Missing property isn't a problem, but an invalid entry is */
    597		if (!prop) {
    598			if (unlikely(supplies == -1)) {
    599				/* Initialize regulator_count */
    600				opp_table->regulator_count = 0;
    601				return 0;
    602			}
    603
    604			if (!supplies)
    605				return 0;
    606
    607			dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
    608				__func__);
    609			return -EINVAL;
    610		}
    611	}
    612
    613	if (unlikely(supplies == -1)) {
    614		/* Initialize regulator_count */
    615		supplies = opp_table->regulator_count = 1;
    616	} else if (unlikely(!supplies)) {
    617		dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
    618		return -EINVAL;
    619	}
    620
    621	vcount = of_property_count_u32_elems(opp->np, name);
    622	if (vcount < 0) {
    623		dev_err(dev, "%s: Invalid %s property (%d)\n",
    624			__func__, name, vcount);
    625		return vcount;
    626	}
    627
    628	/* There can be one or three elements per supply */
    629	if (vcount != supplies && vcount != supplies * 3) {
    630		dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
    631			__func__, name, vcount, supplies);
    632		return -EINVAL;
    633	}
    634
    635	microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
    636	if (!microvolt)
    637		return -ENOMEM;
    638
    639	ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
    640	if (ret) {
    641		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
    642		ret = -EINVAL;
    643		goto free_microvolt;
    644	}
    645
    646	/* Search for "opp-microamp-<name>" */
    647	prop = NULL;
    648	if (opp_table->prop_name) {
    649		snprintf(name, sizeof(name), "opp-microamp-%s",
    650			 opp_table->prop_name);
    651		prop = of_find_property(opp->np, name, NULL);
    652	}
    653
    654	if (!prop) {
    655		/* Search for "opp-microamp" */
    656		sprintf(name, "opp-microamp");
    657		prop = of_find_property(opp->np, name, NULL);
    658	}
    659
    660	if (prop) {
    661		icount = of_property_count_u32_elems(opp->np, name);
    662		if (icount < 0) {
    663			dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
    664				name, icount);
    665			ret = icount;
    666			goto free_microvolt;
    667		}
    668
    669		if (icount != supplies) {
    670			dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
    671				__func__, name, icount, supplies);
    672			ret = -EINVAL;
    673			goto free_microvolt;
    674		}
    675
    676		microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
    677		if (!microamp) {
    678			ret = -EINVAL;
    679			goto free_microvolt;
    680		}
    681
    682		ret = of_property_read_u32_array(opp->np, name, microamp,
    683						 icount);
    684		if (ret) {
    685			dev_err(dev, "%s: error parsing %s: %d\n", __func__,
    686				name, ret);
    687			ret = -EINVAL;
    688			goto free_microamp;
    689		}
    690	}
    691
    692	/* Search for "opp-microwatt" */
    693	sprintf(name, "opp-microwatt");
    694	prop = of_find_property(opp->np, name, NULL);
    695
    696	if (prop) {
    697		pcount = of_property_count_u32_elems(opp->np, name);
    698		if (pcount < 0) {
    699			dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
    700				name, pcount);
    701			ret = pcount;
    702			goto free_microamp;
    703		}
    704
    705		if (pcount != supplies) {
    706			dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
    707				__func__, name, pcount, supplies);
    708			ret = -EINVAL;
    709			goto free_microamp;
    710		}
    711
    712		microwatt = kmalloc_array(pcount, sizeof(*microwatt),
    713					  GFP_KERNEL);
    714		if (!microwatt) {
    715			ret = -EINVAL;
    716			goto free_microamp;
    717		}
    718
    719		ret = of_property_read_u32_array(opp->np, name, microwatt,
    720						 pcount);
    721		if (ret) {
    722			dev_err(dev, "%s: error parsing %s: %d\n", __func__,
    723				name, ret);
    724			ret = -EINVAL;
    725			goto free_microwatt;
    726		}
    727	}
    728
    729	for (i = 0, j = 0; i < supplies; i++) {
    730		opp->supplies[i].u_volt = microvolt[j++];
    731
    732		if (vcount == supplies) {
    733			opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
    734			opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
    735		} else {
    736			opp->supplies[i].u_volt_min = microvolt[j++];
    737			opp->supplies[i].u_volt_max = microvolt[j++];
    738		}
    739
    740		if (microamp)
    741			opp->supplies[i].u_amp = microamp[i];
    742
    743		if (microwatt)
    744			opp->supplies[i].u_watt = microwatt[i];
    745	}
    746
    747free_microwatt:
    748	kfree(microwatt);
    749free_microamp:
    750	kfree(microamp);
    751free_microvolt:
    752	kfree(microvolt);
    753
    754	return ret;
    755}
    756
    757/**
    758 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
    759 *				  entries
    760 * @dev:	device pointer used to lookup OPP table.
    761 *
    762 * Free OPPs created using static entries present in DT.
    763 */
    764void dev_pm_opp_of_remove_table(struct device *dev)
    765{
    766	dev_pm_opp_remove_table(dev);
    767}
    768EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
    769
    770static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
    771		    struct device_node *np, bool peak)
    772{
    773	const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
    774	struct property *prop;
    775	int i, count, ret;
    776	u32 *bw;
    777
    778	prop = of_find_property(np, name, NULL);
    779	if (!prop)
    780		return -ENODEV;
    781
    782	count = prop->length / sizeof(u32);
    783	if (table->path_count != count) {
    784		pr_err("%s: Mismatch between %s and paths (%d %d)\n",
    785				__func__, name, count, table->path_count);
    786		return -EINVAL;
    787	}
    788
    789	bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
    790	if (!bw)
    791		return -ENOMEM;
    792
    793	ret = of_property_read_u32_array(np, name, bw, count);
    794	if (ret) {
    795		pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
    796		goto out;
    797	}
    798
    799	for (i = 0; i < count; i++) {
    800		if (peak)
    801			new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
    802		else
    803			new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
    804	}
    805
    806out:
    807	kfree(bw);
    808	return ret;
    809}
    810
    811static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
    812			 struct device_node *np, bool *rate_not_available)
    813{
    814	bool found = false;
    815	u64 rate;
    816	int ret;
    817
    818	ret = of_property_read_u64(np, "opp-hz", &rate);
    819	if (!ret) {
    820		/*
    821		 * Rate is defined as an unsigned long in clk API, and so
    822		 * casting explicitly to its type. Must be fixed once rate is 64
    823		 * bit guaranteed in clk API.
    824		 */
    825		new_opp->rate = (unsigned long)rate;
    826		found = true;
    827	}
    828	*rate_not_available = !!ret;
    829
    830	/*
    831	 * Bandwidth consists of peak and average (optional) values:
    832	 * opp-peak-kBps = <path1_value path2_value>;
    833	 * opp-avg-kBps = <path1_value path2_value>;
    834	 */
    835	ret = _read_bw(new_opp, table, np, true);
    836	if (!ret) {
    837		found = true;
    838		ret = _read_bw(new_opp, table, np, false);
    839	}
    840
    841	/* The properties were found but we failed to parse them */
    842	if (ret && ret != -ENODEV)
    843		return ret;
    844
    845	if (!of_property_read_u32(np, "opp-level", &new_opp->level))
    846		found = true;
    847
    848	if (found)
    849		return 0;
    850
    851	return ret;
    852}
    853
    854/**
    855 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
    856 * @opp_table:	OPP table
    857 * @dev:	device for which we do this operation
    858 * @np:		device node
    859 *
    860 * This function adds an opp definition to the opp table and returns status. The
    861 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
    862 * removed by dev_pm_opp_remove.
    863 *
    864 * Return:
    865 * Valid OPP pointer:
    866 *		On success
    867 * NULL:
    868 *		Duplicate OPPs (both freq and volt are same) and opp->available
    869 *		OR if the OPP is not supported by hardware.
    870 * ERR_PTR(-EEXIST):
    871 *		Freq are same and volt are different OR
    872 *		Duplicate OPPs (both freq and volt are same) and !opp->available
    873 * ERR_PTR(-ENOMEM):
    874 *		Memory allocation failure
    875 * ERR_PTR(-EINVAL):
    876 *		Failed parsing the OPP node
    877 */
    878static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
    879		struct device *dev, struct device_node *np)
    880{
    881	struct dev_pm_opp *new_opp;
    882	u32 val;
    883	int ret;
    884	bool rate_not_available = false;
    885
    886	new_opp = _opp_allocate(opp_table);
    887	if (!new_opp)
    888		return ERR_PTR(-ENOMEM);
    889
    890	ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
    891	if (ret < 0) {
    892		dev_err(dev, "%s: opp key field not found\n", __func__);
    893		goto free_opp;
    894	}
    895
    896	/* Check if the OPP supports hardware's hierarchy of versions or not */
    897	if (!_opp_is_supported(dev, opp_table, np)) {
    898		dev_dbg(dev, "OPP not supported by hardware: %lu\n",
    899			new_opp->rate);
    900		goto free_opp;
    901	}
    902
    903	new_opp->turbo = of_property_read_bool(np, "turbo-mode");
    904
    905	new_opp->np = np;
    906	new_opp->dynamic = false;
    907	new_opp->available = true;
    908
    909	ret = _of_opp_alloc_required_opps(opp_table, new_opp);
    910	if (ret)
    911		goto free_opp;
    912
    913	if (!of_property_read_u32(np, "clock-latency-ns", &val))
    914		new_opp->clock_latency_ns = val;
    915
    916	ret = opp_parse_supplies(new_opp, dev, opp_table);
    917	if (ret)
    918		goto free_required_opps;
    919
    920	if (opp_table->is_genpd)
    921		new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
    922
    923	ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
    924	if (ret) {
    925		/* Don't return error for duplicate OPPs */
    926		if (ret == -EBUSY)
    927			ret = 0;
    928		goto free_required_opps;
    929	}
    930
    931	/* OPP to select on device suspend */
    932	if (of_property_read_bool(np, "opp-suspend")) {
    933		if (opp_table->suspend_opp) {
    934			/* Pick the OPP with higher rate as suspend OPP */
    935			if (new_opp->rate > opp_table->suspend_opp->rate) {
    936				opp_table->suspend_opp->suspend = false;
    937				new_opp->suspend = true;
    938				opp_table->suspend_opp = new_opp;
    939			}
    940		} else {
    941			new_opp->suspend = true;
    942			opp_table->suspend_opp = new_opp;
    943		}
    944	}
    945
    946	if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
    947		opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
    948
    949	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
    950		 __func__, new_opp->turbo, new_opp->rate,
    951		 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
    952		 new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
    953		 new_opp->level);
    954
    955	/*
    956	 * Notify the changes in the availability of the operable
    957	 * frequency/voltage list.
    958	 */
    959	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
    960	return new_opp;
    961
    962free_required_opps:
    963	_of_opp_free_required_opps(opp_table, new_opp);
    964free_opp:
    965	_opp_free(new_opp);
    966
    967	return ret ? ERR_PTR(ret) : NULL;
    968}
    969
    970/* Initializes OPP tables based on new bindings */
    971static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
    972{
    973	struct device_node *np;
    974	int ret, count = 0;
    975	struct dev_pm_opp *opp;
    976
    977	/* OPP table is already initialized for the device */
    978	mutex_lock(&opp_table->lock);
    979	if (opp_table->parsed_static_opps) {
    980		opp_table->parsed_static_opps++;
    981		mutex_unlock(&opp_table->lock);
    982		return 0;
    983	}
    984
    985	opp_table->parsed_static_opps = 1;
    986	mutex_unlock(&opp_table->lock);
    987
    988	/* We have opp-table node now, iterate over it and add OPPs */
    989	for_each_available_child_of_node(opp_table->np, np) {
    990		opp = _opp_add_static_v2(opp_table, dev, np);
    991		if (IS_ERR(opp)) {
    992			ret = PTR_ERR(opp);
    993			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
    994				ret);
    995			of_node_put(np);
    996			goto remove_static_opp;
    997		} else if (opp) {
    998			count++;
    999		}
   1000	}
   1001
   1002	/* There should be one or more OPPs defined */
   1003	if (!count) {
   1004		dev_err(dev, "%s: no supported OPPs", __func__);
   1005		ret = -ENOENT;
   1006		goto remove_static_opp;
   1007	}
   1008
   1009	list_for_each_entry(opp, &opp_table->opp_list, node) {
   1010		/* Any non-zero performance state would enable the feature */
   1011		if (opp->pstate) {
   1012			opp_table->genpd_performance_state = true;
   1013			break;
   1014		}
   1015	}
   1016
   1017	lazy_link_required_opp_table(opp_table);
   1018
   1019	return 0;
   1020
   1021remove_static_opp:
   1022	_opp_remove_all_static(opp_table);
   1023
   1024	return ret;
   1025}
   1026
   1027/* Initializes OPP tables based on old-deprecated bindings */
   1028static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
   1029{
   1030	const struct property *prop;
   1031	const __be32 *val;
   1032	int nr, ret = 0;
   1033
   1034	mutex_lock(&opp_table->lock);
   1035	if (opp_table->parsed_static_opps) {
   1036		opp_table->parsed_static_opps++;
   1037		mutex_unlock(&opp_table->lock);
   1038		return 0;
   1039	}
   1040
   1041	opp_table->parsed_static_opps = 1;
   1042	mutex_unlock(&opp_table->lock);
   1043
   1044	prop = of_find_property(dev->of_node, "operating-points", NULL);
   1045	if (!prop) {
   1046		ret = -ENODEV;
   1047		goto remove_static_opp;
   1048	}
   1049	if (!prop->value) {
   1050		ret = -ENODATA;
   1051		goto remove_static_opp;
   1052	}
   1053
   1054	/*
   1055	 * Each OPP is a set of tuples consisting of frequency and
   1056	 * voltage like <freq-kHz vol-uV>.
   1057	 */
   1058	nr = prop->length / sizeof(u32);
   1059	if (nr % 2) {
   1060		dev_err(dev, "%s: Invalid OPP table\n", __func__);
   1061		ret = -EINVAL;
   1062		goto remove_static_opp;
   1063	}
   1064
   1065	val = prop->value;
   1066	while (nr) {
   1067		unsigned long freq = be32_to_cpup(val++) * 1000;
   1068		unsigned long volt = be32_to_cpup(val++);
   1069
   1070		ret = _opp_add_v1(opp_table, dev, freq, volt, false);
   1071		if (ret) {
   1072			dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
   1073				__func__, freq, ret);
   1074			goto remove_static_opp;
   1075		}
   1076		nr -= 2;
   1077	}
   1078
   1079	return 0;
   1080
   1081remove_static_opp:
   1082	_opp_remove_all_static(opp_table);
   1083
   1084	return ret;
   1085}
   1086
   1087static int _of_add_table_indexed(struct device *dev, int index, bool getclk)
   1088{
   1089	struct opp_table *opp_table;
   1090	int ret, count;
   1091
   1092	if (index) {
   1093		/*
   1094		 * If only one phandle is present, then the same OPP table
   1095		 * applies for all index requests.
   1096		 */
   1097		count = of_count_phandle_with_args(dev->of_node,
   1098						   "operating-points-v2", NULL);
   1099		if (count == 1)
   1100			index = 0;
   1101	}
   1102
   1103	opp_table = _add_opp_table_indexed(dev, index, getclk);
   1104	if (IS_ERR(opp_table))
   1105		return PTR_ERR(opp_table);
   1106
   1107	/*
   1108	 * OPPs have two version of bindings now. Also try the old (v1)
   1109	 * bindings for backward compatibility with older dtbs.
   1110	 */
   1111	if (opp_table->np)
   1112		ret = _of_add_opp_table_v2(dev, opp_table);
   1113	else
   1114		ret = _of_add_opp_table_v1(dev, opp_table);
   1115
   1116	if (ret)
   1117		dev_pm_opp_put_opp_table(opp_table);
   1118
   1119	return ret;
   1120}
   1121
   1122static void devm_pm_opp_of_table_release(void *data)
   1123{
   1124	dev_pm_opp_of_remove_table(data);
   1125}
   1126
   1127static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
   1128{
   1129	int ret;
   1130
   1131	ret = _of_add_table_indexed(dev, index, getclk);
   1132	if (ret)
   1133		return ret;
   1134
   1135	return devm_add_action_or_reset(dev, devm_pm_opp_of_table_release, dev);
   1136}
   1137
   1138/**
   1139 * devm_pm_opp_of_add_table() - Initialize opp table from device tree
   1140 * @dev:	device pointer used to lookup OPP table.
   1141 *
   1142 * Register the initial OPP table with the OPP library for given device.
   1143 *
   1144 * The opp_table structure will be freed after the device is destroyed.
   1145 *
   1146 * Return:
   1147 * 0		On success OR
   1148 *		Duplicate OPPs (both freq and volt are same) and opp->available
   1149 * -EEXIST	Freq are same and volt are different OR
   1150 *		Duplicate OPPs (both freq and volt are same) and !opp->available
   1151 * -ENOMEM	Memory allocation failure
   1152 * -ENODEV	when 'operating-points' property is not found or is invalid data
   1153 *		in device node.
   1154 * -ENODATA	when empty 'operating-points' property is found
   1155 * -EINVAL	when invalid entries are found in opp-v2 table
   1156 */
   1157int devm_pm_opp_of_add_table(struct device *dev)
   1158{
   1159	return _devm_of_add_table_indexed(dev, 0, true);
   1160}
   1161EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
   1162
   1163/**
   1164 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
   1165 * @dev:	device pointer used to lookup OPP table.
   1166 *
   1167 * Register the initial OPP table with the OPP library for given device.
   1168 *
   1169 * Return:
   1170 * 0		On success OR
   1171 *		Duplicate OPPs (both freq and volt are same) and opp->available
   1172 * -EEXIST	Freq are same and volt are different OR
   1173 *		Duplicate OPPs (both freq and volt are same) and !opp->available
   1174 * -ENOMEM	Memory allocation failure
   1175 * -ENODEV	when 'operating-points' property is not found or is invalid data
   1176 *		in device node.
   1177 * -ENODATA	when empty 'operating-points' property is found
   1178 * -EINVAL	when invalid entries are found in opp-v2 table
   1179 */
   1180int dev_pm_opp_of_add_table(struct device *dev)
   1181{
   1182	return _of_add_table_indexed(dev, 0, true);
   1183}
   1184EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
   1185
   1186/**
   1187 * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
   1188 * @dev:	device pointer used to lookup OPP table.
   1189 * @index:	Index number.
   1190 *
   1191 * Register the initial OPP table with the OPP library for given device only
   1192 * using the "operating-points-v2" property.
   1193 *
   1194 * Return: Refer to dev_pm_opp_of_add_table() for return values.
   1195 */
   1196int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
   1197{
   1198	return _of_add_table_indexed(dev, index, true);
   1199}
   1200EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
   1201
   1202/**
   1203 * devm_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
   1204 * @dev:	device pointer used to lookup OPP table.
   1205 * @index:	Index number.
   1206 *
   1207 * This is a resource-managed variant of dev_pm_opp_of_add_table_indexed().
   1208 */
   1209int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
   1210{
   1211	return _devm_of_add_table_indexed(dev, index, true);
   1212}
   1213EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
   1214
   1215/**
   1216 * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
   1217 *		tree without getting clk for device.
   1218 * @dev:	device pointer used to lookup OPP table.
   1219 * @index:	Index number.
   1220 *
   1221 * Register the initial OPP table with the OPP library for given device only
   1222 * using the "operating-points-v2" property. Do not try to get the clk for the
   1223 * device.
   1224 *
   1225 * Return: Refer to dev_pm_opp_of_add_table() for return values.
   1226 */
   1227int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
   1228{
   1229	return _of_add_table_indexed(dev, index, false);
   1230}
   1231EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
   1232
   1233/**
   1234 * devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
   1235 *		tree without getting clk for device.
   1236 * @dev:	device pointer used to lookup OPP table.
   1237 * @index:	Index number.
   1238 *
   1239 * This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
   1240 */
   1241int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
   1242{
   1243	return _devm_of_add_table_indexed(dev, index, false);
   1244}
   1245EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
   1246
   1247/* CPU device specific helpers */
   1248
   1249/**
   1250 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
   1251 * @cpumask:	cpumask for which OPP table needs to be removed
   1252 *
   1253 * This removes the OPP tables for CPUs present in the @cpumask.
   1254 * This should be used only to remove static entries created from DT.
   1255 */
   1256void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
   1257{
   1258	_dev_pm_opp_cpumask_remove_table(cpumask, -1);
   1259}
   1260EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
   1261
   1262/**
   1263 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
   1264 * @cpumask:	cpumask for which OPP table needs to be added.
   1265 *
   1266 * This adds the OPP tables for CPUs present in the @cpumask.
   1267 */
   1268int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
   1269{
   1270	struct device *cpu_dev;
   1271	int cpu, ret;
   1272
   1273	if (WARN_ON(cpumask_empty(cpumask)))
   1274		return -ENODEV;
   1275
   1276	for_each_cpu(cpu, cpumask) {
   1277		cpu_dev = get_cpu_device(cpu);
   1278		if (!cpu_dev) {
   1279			pr_err("%s: failed to get cpu%d device\n", __func__,
   1280			       cpu);
   1281			ret = -ENODEV;
   1282			goto remove_table;
   1283		}
   1284
   1285		ret = dev_pm_opp_of_add_table(cpu_dev);
   1286		if (ret) {
   1287			/*
   1288			 * OPP may get registered dynamically, don't print error
   1289			 * message here.
   1290			 */
   1291			pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
   1292				 __func__, cpu, ret);
   1293
   1294			goto remove_table;
   1295		}
   1296	}
   1297
   1298	return 0;
   1299
   1300remove_table:
   1301	/* Free all other OPPs */
   1302	_dev_pm_opp_cpumask_remove_table(cpumask, cpu);
   1303
   1304	return ret;
   1305}
   1306EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
   1307
   1308/*
   1309 * Works only for OPP v2 bindings.
   1310 *
   1311 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
   1312 */
   1313/**
   1314 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
   1315 *				      @cpu_dev using operating-points-v2
   1316 *				      bindings.
   1317 *
   1318 * @cpu_dev:	CPU device for which we do this operation
   1319 * @cpumask:	cpumask to update with information of sharing CPUs
   1320 *
   1321 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
   1322 *
   1323 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
   1324 */
   1325int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
   1326				   struct cpumask *cpumask)
   1327{
   1328	struct device_node *np, *tmp_np, *cpu_np;
   1329	int cpu, ret = 0;
   1330
   1331	/* Get OPP descriptor node */
   1332	np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
   1333	if (!np) {
   1334		dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
   1335		return -ENOENT;
   1336	}
   1337
   1338	cpumask_set_cpu(cpu_dev->id, cpumask);
   1339
   1340	/* OPPs are shared ? */
   1341	if (!of_property_read_bool(np, "opp-shared"))
   1342		goto put_cpu_node;
   1343
   1344	for_each_possible_cpu(cpu) {
   1345		if (cpu == cpu_dev->id)
   1346			continue;
   1347
   1348		cpu_np = of_cpu_device_node_get(cpu);
   1349		if (!cpu_np) {
   1350			dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
   1351				__func__, cpu);
   1352			ret = -ENOENT;
   1353			goto put_cpu_node;
   1354		}
   1355
   1356		/* Get OPP descriptor node */
   1357		tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
   1358		of_node_put(cpu_np);
   1359		if (!tmp_np) {
   1360			pr_err("%pOF: Couldn't find opp node\n", cpu_np);
   1361			ret = -ENOENT;
   1362			goto put_cpu_node;
   1363		}
   1364
   1365		/* CPUs are sharing opp node */
   1366		if (np == tmp_np)
   1367			cpumask_set_cpu(cpu, cpumask);
   1368
   1369		of_node_put(tmp_np);
   1370	}
   1371
   1372put_cpu_node:
   1373	of_node_put(np);
   1374	return ret;
   1375}
   1376EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
   1377
   1378/**
   1379 * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
   1380 * @np: Node that contains the "required-opps" property.
   1381 * @index: Index of the phandle to parse.
   1382 *
   1383 * Returns the performance state of the OPP pointed out by the "required-opps"
   1384 * property at @index in @np.
   1385 *
   1386 * Return: Zero or positive performance state on success, otherwise negative
   1387 * value on errors.
   1388 */
   1389int of_get_required_opp_performance_state(struct device_node *np, int index)
   1390{
   1391	struct dev_pm_opp *opp;
   1392	struct device_node *required_np;
   1393	struct opp_table *opp_table;
   1394	int pstate = -EINVAL;
   1395
   1396	required_np = of_parse_required_opp(np, index);
   1397	if (!required_np)
   1398		return -ENODEV;
   1399
   1400	opp_table = _find_table_of_opp_np(required_np);
   1401	if (IS_ERR(opp_table)) {
   1402		pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
   1403		       __func__, np, PTR_ERR(opp_table));
   1404		goto put_required_np;
   1405	}
   1406
   1407	opp = _find_opp_of_np(opp_table, required_np);
   1408	if (opp) {
   1409		pstate = opp->pstate;
   1410		dev_pm_opp_put(opp);
   1411	}
   1412
   1413	dev_pm_opp_put_opp_table(opp_table);
   1414
   1415put_required_np:
   1416	of_node_put(required_np);
   1417
   1418	return pstate;
   1419}
   1420EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
   1421
   1422/**
   1423 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
   1424 * @opp:	opp for which DT node has to be returned for
   1425 *
   1426 * Return: DT node corresponding to the opp, else 0 on success.
   1427 *
   1428 * The caller needs to put the node with of_node_put() after using it.
   1429 */
   1430struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
   1431{
   1432	if (IS_ERR_OR_NULL(opp)) {
   1433		pr_err("%s: Invalid parameters\n", __func__);
   1434		return NULL;
   1435	}
   1436
   1437	return of_node_get(opp->np);
   1438}
   1439EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
   1440
   1441/*
   1442 * Callback function provided to the Energy Model framework upon registration.
   1443 * It provides the power used by @dev at @kHz if it is the frequency of an
   1444 * existing OPP, or at the frequency of the first OPP above @kHz otherwise
   1445 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
   1446 * frequency and @mW to the associated power.
   1447 *
   1448 * Returns 0 on success or a proper -EINVAL value in case of error.
   1449 */
   1450static int __maybe_unused
   1451_get_dt_power(struct device *dev, unsigned long *mW, unsigned long *kHz)
   1452{
   1453	struct dev_pm_opp *opp;
   1454	unsigned long opp_freq, opp_power;
   1455
   1456	/* Find the right frequency and related OPP */
   1457	opp_freq = *kHz * 1000;
   1458	opp = dev_pm_opp_find_freq_ceil(dev, &opp_freq);
   1459	if (IS_ERR(opp))
   1460		return -EINVAL;
   1461
   1462	opp_power = dev_pm_opp_get_power(opp);
   1463	dev_pm_opp_put(opp);
   1464	if (!opp_power)
   1465		return -EINVAL;
   1466
   1467	*kHz = opp_freq / 1000;
   1468	*mW = opp_power / 1000;
   1469
   1470	return 0;
   1471}
   1472
   1473/*
   1474 * Callback function provided to the Energy Model framework upon registration.
   1475 * This computes the power estimated by @dev at @kHz if it is the frequency
   1476 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
   1477 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
   1478 * frequency and @mW to the associated power. The power is estimated as
   1479 * P = C * V^2 * f with C being the device's capacitance and V and f
   1480 * respectively the voltage and frequency of the OPP.
   1481 *
   1482 * Returns -EINVAL if the power calculation failed because of missing
   1483 * parameters, 0 otherwise.
   1484 */
   1485static int __maybe_unused _get_power(struct device *dev, unsigned long *mW,
   1486				     unsigned long *kHz)
   1487{
   1488	struct dev_pm_opp *opp;
   1489	struct device_node *np;
   1490	unsigned long mV, Hz;
   1491	u32 cap;
   1492	u64 tmp;
   1493	int ret;
   1494
   1495	np = of_node_get(dev->of_node);
   1496	if (!np)
   1497		return -EINVAL;
   1498
   1499	ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
   1500	of_node_put(np);
   1501	if (ret)
   1502		return -EINVAL;
   1503
   1504	Hz = *kHz * 1000;
   1505	opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
   1506	if (IS_ERR(opp))
   1507		return -EINVAL;
   1508
   1509	mV = dev_pm_opp_get_voltage(opp) / 1000;
   1510	dev_pm_opp_put(opp);
   1511	if (!mV)
   1512		return -EINVAL;
   1513
   1514	tmp = (u64)cap * mV * mV * (Hz / 1000000);
   1515	do_div(tmp, 1000000000);
   1516
   1517	*mW = (unsigned long)tmp;
   1518	*kHz = Hz / 1000;
   1519
   1520	return 0;
   1521}
   1522
   1523static bool _of_has_opp_microwatt_property(struct device *dev)
   1524{
   1525	unsigned long power, freq = 0;
   1526	struct dev_pm_opp *opp;
   1527
   1528	/* Check if at least one OPP has needed property */
   1529	opp = dev_pm_opp_find_freq_ceil(dev, &freq);
   1530	if (IS_ERR(opp))
   1531		return false;
   1532
   1533	power = dev_pm_opp_get_power(opp);
   1534	dev_pm_opp_put(opp);
   1535	if (!power)
   1536		return false;
   1537
   1538	return true;
   1539}
   1540
   1541/**
   1542 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
   1543 * @dev		: Device for which an Energy Model has to be registered
   1544 * @cpus	: CPUs for which an Energy Model has to be registered. For
   1545 *		other type of devices it should be set to NULL.
   1546 *
   1547 * This checks whether the "dynamic-power-coefficient" devicetree property has
   1548 * been specified, and tries to register an Energy Model with it if it has.
   1549 * Having this property means the voltages are known for OPPs and the EM
   1550 * might be calculated.
   1551 */
   1552int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
   1553{
   1554	struct em_data_callback em_cb;
   1555	struct device_node *np;
   1556	int ret, nr_opp;
   1557	u32 cap;
   1558
   1559	if (IS_ERR_OR_NULL(dev)) {
   1560		ret = -EINVAL;
   1561		goto failed;
   1562	}
   1563
   1564	nr_opp = dev_pm_opp_get_opp_count(dev);
   1565	if (nr_opp <= 0) {
   1566		ret = -EINVAL;
   1567		goto failed;
   1568	}
   1569
   1570	/* First, try to find more precised Energy Model in DT */
   1571	if (_of_has_opp_microwatt_property(dev)) {
   1572		EM_SET_ACTIVE_POWER_CB(em_cb, _get_dt_power);
   1573		goto register_em;
   1574	}
   1575
   1576	np = of_node_get(dev->of_node);
   1577	if (!np) {
   1578		ret = -EINVAL;
   1579		goto failed;
   1580	}
   1581
   1582	/*
   1583	 * Register an EM only if the 'dynamic-power-coefficient' property is
   1584	 * set in devicetree. It is assumed the voltage values are known if that
   1585	 * property is set since it is useless otherwise. If voltages are not
   1586	 * known, just let the EM registration fail with an error to alert the
   1587	 * user about the inconsistent configuration.
   1588	 */
   1589	ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
   1590	of_node_put(np);
   1591	if (ret || !cap) {
   1592		dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
   1593		ret = -EINVAL;
   1594		goto failed;
   1595	}
   1596
   1597	EM_SET_ACTIVE_POWER_CB(em_cb, _get_power);
   1598
   1599register_em:
   1600	ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
   1601	if (ret)
   1602		goto failed;
   1603
   1604	return 0;
   1605
   1606failed:
   1607	dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
   1608	return ret;
   1609}
   1610EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);