cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (82144B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Generic OPP Interface
      4 *
      5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
      6 *	Nishanth Menon
      7 *	Romit Dasgupta
      8 *	Kevin Hilman
      9 */
     10
     11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     12
     13#include <linux/clk.h>
     14#include <linux/errno.h>
     15#include <linux/err.h>
     16#include <linux/slab.h>
     17#include <linux/device.h>
     18#include <linux/export.h>
     19#include <linux/pm_domain.h>
     20#include <linux/regulator/consumer.h>
     21
     22#include "opp.h"
     23
     24/*
     25 * The root of the list of all opp-tables. All opp_table structures branch off
     26 * from here, with each opp_table containing the list of opps it supports in
     27 * various states of availability.
     28 */
     29LIST_HEAD(opp_tables);
     30
     31/* OPP tables with uninitialized required OPPs */
     32LIST_HEAD(lazy_opp_tables);
     33
     34/* Lock to allow exclusive modification to the device and opp lists */
     35DEFINE_MUTEX(opp_table_lock);
     36/* Flag indicating that opp_tables list is being updated at the moment */
     37static bool opp_tables_busy;
     38
     39static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
     40{
     41	struct opp_device *opp_dev;
     42	bool found = false;
     43
     44	mutex_lock(&opp_table->lock);
     45	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
     46		if (opp_dev->dev == dev) {
     47			found = true;
     48			break;
     49		}
     50
     51	mutex_unlock(&opp_table->lock);
     52	return found;
     53}
     54
     55static struct opp_table *_find_opp_table_unlocked(struct device *dev)
     56{
     57	struct opp_table *opp_table;
     58
     59	list_for_each_entry(opp_table, &opp_tables, node) {
     60		if (_find_opp_dev(dev, opp_table)) {
     61			_get_opp_table_kref(opp_table);
     62			return opp_table;
     63		}
     64	}
     65
     66	return ERR_PTR(-ENODEV);
     67}
     68
     69/**
     70 * _find_opp_table() - find opp_table struct using device pointer
     71 * @dev:	device pointer used to lookup OPP table
     72 *
     73 * Search OPP table for one containing matching device.
     74 *
     75 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
     76 * -EINVAL based on type of error.
     77 *
     78 * The callers must call dev_pm_opp_put_opp_table() after the table is used.
     79 */
     80struct opp_table *_find_opp_table(struct device *dev)
     81{
     82	struct opp_table *opp_table;
     83
     84	if (IS_ERR_OR_NULL(dev)) {
     85		pr_err("%s: Invalid parameters\n", __func__);
     86		return ERR_PTR(-EINVAL);
     87	}
     88
     89	mutex_lock(&opp_table_lock);
     90	opp_table = _find_opp_table_unlocked(dev);
     91	mutex_unlock(&opp_table_lock);
     92
     93	return opp_table;
     94}
     95
     96/**
     97 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
     98 * @opp:	opp for which voltage has to be returned for
     99 *
    100 * Return: voltage in micro volt corresponding to the opp, else
    101 * return 0
    102 *
    103 * This is useful only for devices with single power supply.
    104 */
    105unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
    106{
    107	if (IS_ERR_OR_NULL(opp)) {
    108		pr_err("%s: Invalid parameters\n", __func__);
    109		return 0;
    110	}
    111
    112	return opp->supplies[0].u_volt;
    113}
    114EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
    115
    116/**
    117 * dev_pm_opp_get_power() - Gets the power corresponding to an opp
    118 * @opp:	opp for which power has to be returned for
    119 *
    120 * Return: power in micro watt corresponding to the opp, else
    121 * return 0
    122 *
    123 * This is useful only for devices with single power supply.
    124 */
    125unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
    126{
    127	unsigned long opp_power = 0;
    128	int i;
    129
    130	if (IS_ERR_OR_NULL(opp)) {
    131		pr_err("%s: Invalid parameters\n", __func__);
    132		return 0;
    133	}
    134	for (i = 0; i < opp->opp_table->regulator_count; i++)
    135		opp_power += opp->supplies[i].u_watt;
    136
    137	return opp_power;
    138}
    139EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
    140
    141/**
    142 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
    143 * @opp:	opp for which frequency has to be returned for
    144 *
    145 * Return: frequency in hertz corresponding to the opp, else
    146 * return 0
    147 */
    148unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
    149{
    150	if (IS_ERR_OR_NULL(opp)) {
    151		pr_err("%s: Invalid parameters\n", __func__);
    152		return 0;
    153	}
    154
    155	return opp->rate;
    156}
    157EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
    158
    159/**
    160 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
    161 * @opp:	opp for which level value has to be returned for
    162 *
    163 * Return: level read from device tree corresponding to the opp, else
    164 * return 0.
    165 */
    166unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
    167{
    168	if (IS_ERR_OR_NULL(opp) || !opp->available) {
    169		pr_err("%s: Invalid parameters\n", __func__);
    170		return 0;
    171	}
    172
    173	return opp->level;
    174}
    175EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
    176
    177/**
    178 * dev_pm_opp_get_required_pstate() - Gets the required performance state
    179 *                                    corresponding to an available opp
    180 * @opp:	opp for which performance state has to be returned for
    181 * @index:	index of the required opp
    182 *
    183 * Return: performance state read from device tree corresponding to the
    184 * required opp, else return 0.
    185 */
    186unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
    187					    unsigned int index)
    188{
    189	if (IS_ERR_OR_NULL(opp) || !opp->available ||
    190	    index >= opp->opp_table->required_opp_count) {
    191		pr_err("%s: Invalid parameters\n", __func__);
    192		return 0;
    193	}
    194
    195	/* required-opps not fully initialized yet */
    196	if (lazy_linking_pending(opp->opp_table))
    197		return 0;
    198
    199	return opp->required_opps[index]->pstate;
    200}
    201EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
    202
    203/**
    204 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
    205 * @opp: opp for which turbo mode is being verified
    206 *
    207 * Turbo OPPs are not for normal use, and can be enabled (under certain
    208 * conditions) for short duration of times to finish high throughput work
    209 * quickly. Running on them for longer times may overheat the chip.
    210 *
    211 * Return: true if opp is turbo opp, else false.
    212 */
    213bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
    214{
    215	if (IS_ERR_OR_NULL(opp) || !opp->available) {
    216		pr_err("%s: Invalid parameters\n", __func__);
    217		return false;
    218	}
    219
    220	return opp->turbo;
    221}
    222EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
    223
    224/**
    225 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
    226 * @dev:	device for which we do this operation
    227 *
    228 * Return: This function returns the max clock latency in nanoseconds.
    229 */
    230unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
    231{
    232	struct opp_table *opp_table;
    233	unsigned long clock_latency_ns;
    234
    235	opp_table = _find_opp_table(dev);
    236	if (IS_ERR(opp_table))
    237		return 0;
    238
    239	clock_latency_ns = opp_table->clock_latency_ns_max;
    240
    241	dev_pm_opp_put_opp_table(opp_table);
    242
    243	return clock_latency_ns;
    244}
    245EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
    246
    247/**
    248 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
    249 * @dev: device for which we do this operation
    250 *
    251 * Return: This function returns the max voltage latency in nanoseconds.
    252 */
    253unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
    254{
    255	struct opp_table *opp_table;
    256	struct dev_pm_opp *opp;
    257	struct regulator *reg;
    258	unsigned long latency_ns = 0;
    259	int ret, i, count;
    260	struct {
    261		unsigned long min;
    262		unsigned long max;
    263	} *uV;
    264
    265	opp_table = _find_opp_table(dev);
    266	if (IS_ERR(opp_table))
    267		return 0;
    268
    269	/* Regulator may not be required for the device */
    270	if (!opp_table->regulators)
    271		goto put_opp_table;
    272
    273	count = opp_table->regulator_count;
    274
    275	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
    276	if (!uV)
    277		goto put_opp_table;
    278
    279	mutex_lock(&opp_table->lock);
    280
    281	for (i = 0; i < count; i++) {
    282		uV[i].min = ~0;
    283		uV[i].max = 0;
    284
    285		list_for_each_entry(opp, &opp_table->opp_list, node) {
    286			if (!opp->available)
    287				continue;
    288
    289			if (opp->supplies[i].u_volt_min < uV[i].min)
    290				uV[i].min = opp->supplies[i].u_volt_min;
    291			if (opp->supplies[i].u_volt_max > uV[i].max)
    292				uV[i].max = opp->supplies[i].u_volt_max;
    293		}
    294	}
    295
    296	mutex_unlock(&opp_table->lock);
    297
    298	/*
    299	 * The caller needs to ensure that opp_table (and hence the regulator)
    300	 * isn't freed, while we are executing this routine.
    301	 */
    302	for (i = 0; i < count; i++) {
    303		reg = opp_table->regulators[i];
    304		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
    305		if (ret > 0)
    306			latency_ns += ret * 1000;
    307	}
    308
    309	kfree(uV);
    310put_opp_table:
    311	dev_pm_opp_put_opp_table(opp_table);
    312
    313	return latency_ns;
    314}
    315EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
    316
    317/**
    318 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
    319 *					     nanoseconds
    320 * @dev: device for which we do this operation
    321 *
    322 * Return: This function returns the max transition latency, in nanoseconds, to
    323 * switch from one OPP to other.
    324 */
    325unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
    326{
    327	return dev_pm_opp_get_max_volt_latency(dev) +
    328		dev_pm_opp_get_max_clock_latency(dev);
    329}
    330EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
    331
    332/**
    333 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz
    334 * @dev:	device for which we do this operation
    335 *
    336 * Return: This function returns the frequency of the OPP marked as suspend_opp
    337 * if one is available, else returns 0;
    338 */
    339unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
    340{
    341	struct opp_table *opp_table;
    342	unsigned long freq = 0;
    343
    344	opp_table = _find_opp_table(dev);
    345	if (IS_ERR(opp_table))
    346		return 0;
    347
    348	if (opp_table->suspend_opp && opp_table->suspend_opp->available)
    349		freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
    350
    351	dev_pm_opp_put_opp_table(opp_table);
    352
    353	return freq;
    354}
    355EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
    356
    357int _get_opp_count(struct opp_table *opp_table)
    358{
    359	struct dev_pm_opp *opp;
    360	int count = 0;
    361
    362	mutex_lock(&opp_table->lock);
    363
    364	list_for_each_entry(opp, &opp_table->opp_list, node) {
    365		if (opp->available)
    366			count++;
    367	}
    368
    369	mutex_unlock(&opp_table->lock);
    370
    371	return count;
    372}
    373
    374/**
    375 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
    376 * @dev:	device for which we do this operation
    377 *
    378 * Return: This function returns the number of available opps if there are any,
    379 * else returns 0 if none or the corresponding error value.
    380 */
    381int dev_pm_opp_get_opp_count(struct device *dev)
    382{
    383	struct opp_table *opp_table;
    384	int count;
    385
    386	opp_table = _find_opp_table(dev);
    387	if (IS_ERR(opp_table)) {
    388		count = PTR_ERR(opp_table);
    389		dev_dbg(dev, "%s: OPP table not found (%d)\n",
    390			__func__, count);
    391		return count;
    392	}
    393
    394	count = _get_opp_count(opp_table);
    395	dev_pm_opp_put_opp_table(opp_table);
    396
    397	return count;
    398}
    399EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
    400
    401/**
    402 * dev_pm_opp_find_freq_exact() - search for an exact frequency
    403 * @dev:		device for which we do this operation
    404 * @freq:		frequency to search for
    405 * @available:		true/false - match for available opp
    406 *
    407 * Return: Searches for exact match in the opp table and returns pointer to the
    408 * matching opp if found, else returns ERR_PTR in case of error and should
    409 * be handled using IS_ERR. Error return values can be:
    410 * EINVAL:	for bad pointer
    411 * ERANGE:	no match found for search
    412 * ENODEV:	if device not found in list of registered devices
    413 *
    414 * Note: available is a modifier for the search. if available=true, then the
    415 * match is for exact matching frequency and is available in the stored OPP
    416 * table. if false, the match is for exact frequency which is not available.
    417 *
    418 * This provides a mechanism to enable an opp which is not available currently
    419 * or the opposite as well.
    420 *
    421 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    422 * use.
    423 */
    424struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
    425					      unsigned long freq,
    426					      bool available)
    427{
    428	struct opp_table *opp_table;
    429	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    430
    431	opp_table = _find_opp_table(dev);
    432	if (IS_ERR(opp_table)) {
    433		int r = PTR_ERR(opp_table);
    434
    435		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
    436		return ERR_PTR(r);
    437	}
    438
    439	mutex_lock(&opp_table->lock);
    440
    441	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    442		if (temp_opp->available == available &&
    443				temp_opp->rate == freq) {
    444			opp = temp_opp;
    445
    446			/* Increment the reference count of OPP */
    447			dev_pm_opp_get(opp);
    448			break;
    449		}
    450	}
    451
    452	mutex_unlock(&opp_table->lock);
    453	dev_pm_opp_put_opp_table(opp_table);
    454
    455	return opp;
    456}
    457EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
    458
    459static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
    460						   unsigned long *freq)
    461{
    462	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    463
    464	mutex_lock(&opp_table->lock);
    465
    466	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    467		if (temp_opp->available && temp_opp->rate >= *freq) {
    468			opp = temp_opp;
    469			*freq = opp->rate;
    470
    471			/* Increment the reference count of OPP */
    472			dev_pm_opp_get(opp);
    473			break;
    474		}
    475	}
    476
    477	mutex_unlock(&opp_table->lock);
    478
    479	return opp;
    480}
    481
    482/**
    483 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
    484 * @dev:	device for which we do this operation
    485 * @freq:	Start frequency
    486 *
    487 * Search for the matching ceil *available* OPP from a starting freq
    488 * for a device.
    489 *
    490 * Return: matching *opp and refreshes *freq accordingly, else returns
    491 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
    492 * values can be:
    493 * EINVAL:	for bad pointer
    494 * ERANGE:	no match found for search
    495 * ENODEV:	if device not found in list of registered devices
    496 *
    497 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    498 * use.
    499 */
    500struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
    501					     unsigned long *freq)
    502{
    503	struct opp_table *opp_table;
    504	struct dev_pm_opp *opp;
    505
    506	if (!dev || !freq) {
    507		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
    508		return ERR_PTR(-EINVAL);
    509	}
    510
    511	opp_table = _find_opp_table(dev);
    512	if (IS_ERR(opp_table))
    513		return ERR_CAST(opp_table);
    514
    515	opp = _find_freq_ceil(opp_table, freq);
    516
    517	dev_pm_opp_put_opp_table(opp_table);
    518
    519	return opp;
    520}
    521EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
    522
    523/**
    524 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
    525 * @dev:	device for which we do this operation
    526 * @freq:	Start frequency
    527 *
    528 * Search for the matching floor *available* OPP from a starting freq
    529 * for a device.
    530 *
    531 * Return: matching *opp and refreshes *freq accordingly, else returns
    532 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
    533 * values can be:
    534 * EINVAL:	for bad pointer
    535 * ERANGE:	no match found for search
    536 * ENODEV:	if device not found in list of registered devices
    537 *
    538 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    539 * use.
    540 */
    541struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
    542					      unsigned long *freq)
    543{
    544	struct opp_table *opp_table;
    545	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    546
    547	if (!dev || !freq) {
    548		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
    549		return ERR_PTR(-EINVAL);
    550	}
    551
    552	opp_table = _find_opp_table(dev);
    553	if (IS_ERR(opp_table))
    554		return ERR_CAST(opp_table);
    555
    556	mutex_lock(&opp_table->lock);
    557
    558	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    559		if (temp_opp->available) {
    560			/* go to the next node, before choosing prev */
    561			if (temp_opp->rate > *freq)
    562				break;
    563			else
    564				opp = temp_opp;
    565		}
    566	}
    567
    568	/* Increment the reference count of OPP */
    569	if (!IS_ERR(opp))
    570		dev_pm_opp_get(opp);
    571	mutex_unlock(&opp_table->lock);
    572	dev_pm_opp_put_opp_table(opp_table);
    573
    574	if (!IS_ERR(opp))
    575		*freq = opp->rate;
    576
    577	return opp;
    578}
    579EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
    580
    581/**
    582 * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
    583 *					 target voltage.
    584 * @dev:	Device for which we do this operation.
    585 * @u_volt:	Target voltage.
    586 *
    587 * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
    588 *
    589 * Return: matching *opp, else returns ERR_PTR in case of error which should be
    590 * handled using IS_ERR.
    591 *
    592 * Error return values can be:
    593 * EINVAL:	bad parameters
    594 *
    595 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    596 * use.
    597 */
    598struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
    599						     unsigned long u_volt)
    600{
    601	struct opp_table *opp_table;
    602	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    603
    604	if (!dev || !u_volt) {
    605		dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
    606			u_volt);
    607		return ERR_PTR(-EINVAL);
    608	}
    609
    610	opp_table = _find_opp_table(dev);
    611	if (IS_ERR(opp_table))
    612		return ERR_CAST(opp_table);
    613
    614	mutex_lock(&opp_table->lock);
    615
    616	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    617		if (temp_opp->available) {
    618			if (temp_opp->supplies[0].u_volt > u_volt)
    619				break;
    620			opp = temp_opp;
    621		}
    622	}
    623
    624	/* Increment the reference count of OPP */
    625	if (!IS_ERR(opp))
    626		dev_pm_opp_get(opp);
    627
    628	mutex_unlock(&opp_table->lock);
    629	dev_pm_opp_put_opp_table(opp_table);
    630
    631	return opp;
    632}
    633EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
    634
    635/**
    636 * dev_pm_opp_find_level_exact() - search for an exact level
    637 * @dev:		device for which we do this operation
    638 * @level:		level to search for
    639 *
    640 * Return: Searches for exact match in the opp table and returns pointer to the
    641 * matching opp if found, else returns ERR_PTR in case of error and should
    642 * be handled using IS_ERR. Error return values can be:
    643 * EINVAL:	for bad pointer
    644 * ERANGE:	no match found for search
    645 * ENODEV:	if device not found in list of registered devices
    646 *
    647 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    648 * use.
    649 */
    650struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
    651					       unsigned int level)
    652{
    653	struct opp_table *opp_table;
    654	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    655
    656	opp_table = _find_opp_table(dev);
    657	if (IS_ERR(opp_table)) {
    658		int r = PTR_ERR(opp_table);
    659
    660		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
    661		return ERR_PTR(r);
    662	}
    663
    664	mutex_lock(&opp_table->lock);
    665
    666	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    667		if (temp_opp->level == level) {
    668			opp = temp_opp;
    669
    670			/* Increment the reference count of OPP */
    671			dev_pm_opp_get(opp);
    672			break;
    673		}
    674	}
    675
    676	mutex_unlock(&opp_table->lock);
    677	dev_pm_opp_put_opp_table(opp_table);
    678
    679	return opp;
    680}
    681EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact);
    682
    683/**
    684 * dev_pm_opp_find_level_ceil() - search for an rounded up level
    685 * @dev:		device for which we do this operation
    686 * @level:		level to search for
    687 *
    688 * Return: Searches for rounded up match in the opp table and returns pointer
    689 * to the  matching opp if found, else returns ERR_PTR in case of error and
    690 * should be handled using IS_ERR. Error return values can be:
    691 * EINVAL:	for bad pointer
    692 * ERANGE:	no match found for search
    693 * ENODEV:	if device not found in list of registered devices
    694 *
    695 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    696 * use.
    697 */
    698struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
    699					      unsigned int *level)
    700{
    701	struct opp_table *opp_table;
    702	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    703
    704	opp_table = _find_opp_table(dev);
    705	if (IS_ERR(opp_table)) {
    706		int r = PTR_ERR(opp_table);
    707
    708		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
    709		return ERR_PTR(r);
    710	}
    711
    712	mutex_lock(&opp_table->lock);
    713
    714	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    715		if (temp_opp->available && temp_opp->level >= *level) {
    716			opp = temp_opp;
    717			*level = opp->level;
    718
    719			/* Increment the reference count of OPP */
    720			dev_pm_opp_get(opp);
    721			break;
    722		}
    723	}
    724
    725	mutex_unlock(&opp_table->lock);
    726	dev_pm_opp_put_opp_table(opp_table);
    727
    728	return opp;
    729}
    730EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil);
    731
    732/**
    733 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth
    734 * @dev:	device for which we do this operation
    735 * @freq:	start bandwidth
    736 * @index:	which bandwidth to compare, in case of OPPs with several values
    737 *
    738 * Search for the matching floor *available* OPP from a starting bandwidth
    739 * for a device.
    740 *
    741 * Return: matching *opp and refreshes *bw accordingly, else returns
    742 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
    743 * values can be:
    744 * EINVAL:	for bad pointer
    745 * ERANGE:	no match found for search
    746 * ENODEV:	if device not found in list of registered devices
    747 *
    748 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    749 * use.
    750 */
    751struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
    752					   unsigned int *bw, int index)
    753{
    754	struct opp_table *opp_table;
    755	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    756
    757	if (!dev || !bw) {
    758		dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
    759		return ERR_PTR(-EINVAL);
    760	}
    761
    762	opp_table = _find_opp_table(dev);
    763	if (IS_ERR(opp_table))
    764		return ERR_CAST(opp_table);
    765
    766	if (index >= opp_table->path_count)
    767		return ERR_PTR(-EINVAL);
    768
    769	mutex_lock(&opp_table->lock);
    770
    771	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    772		if (temp_opp->available && temp_opp->bandwidth) {
    773			if (temp_opp->bandwidth[index].peak >= *bw) {
    774				opp = temp_opp;
    775				*bw = opp->bandwidth[index].peak;
    776
    777				/* Increment the reference count of OPP */
    778				dev_pm_opp_get(opp);
    779				break;
    780			}
    781		}
    782	}
    783
    784	mutex_unlock(&opp_table->lock);
    785	dev_pm_opp_put_opp_table(opp_table);
    786
    787	return opp;
    788}
    789EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil);
    790
    791/**
    792 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth
    793 * @dev:	device for which we do this operation
    794 * @freq:	start bandwidth
    795 * @index:	which bandwidth to compare, in case of OPPs with several values
    796 *
    797 * Search for the matching floor *available* OPP from a starting bandwidth
    798 * for a device.
    799 *
    800 * Return: matching *opp and refreshes *bw accordingly, else returns
    801 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
    802 * values can be:
    803 * EINVAL:	for bad pointer
    804 * ERANGE:	no match found for search
    805 * ENODEV:	if device not found in list of registered devices
    806 *
    807 * The callers are required to call dev_pm_opp_put() for the returned OPP after
    808 * use.
    809 */
    810struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
    811					    unsigned int *bw, int index)
    812{
    813	struct opp_table *opp_table;
    814	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
    815
    816	if (!dev || !bw) {
    817		dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw);
    818		return ERR_PTR(-EINVAL);
    819	}
    820
    821	opp_table = _find_opp_table(dev);
    822	if (IS_ERR(opp_table))
    823		return ERR_CAST(opp_table);
    824
    825	if (index >= opp_table->path_count)
    826		return ERR_PTR(-EINVAL);
    827
    828	mutex_lock(&opp_table->lock);
    829
    830	list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
    831		if (temp_opp->available && temp_opp->bandwidth) {
    832			/* go to the next node, before choosing prev */
    833			if (temp_opp->bandwidth[index].peak > *bw)
    834				break;
    835			opp = temp_opp;
    836		}
    837	}
    838
    839	/* Increment the reference count of OPP */
    840	if (!IS_ERR(opp))
    841		dev_pm_opp_get(opp);
    842	mutex_unlock(&opp_table->lock);
    843	dev_pm_opp_put_opp_table(opp_table);
    844
    845	if (!IS_ERR(opp))
    846		*bw = opp->bandwidth[index].peak;
    847
    848	return opp;
    849}
    850EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor);
    851
    852static int _set_opp_voltage(struct device *dev, struct regulator *reg,
    853			    struct dev_pm_opp_supply *supply)
    854{
    855	int ret;
    856
    857	/* Regulator not available for device */
    858	if (IS_ERR(reg)) {
    859		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
    860			PTR_ERR(reg));
    861		return 0;
    862	}
    863
    864	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
    865		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
    866
    867	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
    868					    supply->u_volt, supply->u_volt_max);
    869	if (ret)
    870		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
    871			__func__, supply->u_volt_min, supply->u_volt,
    872			supply->u_volt_max, ret);
    873
    874	return ret;
    875}
    876
    877static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
    878					    unsigned long freq)
    879{
    880	int ret;
    881
    882	/* We may reach here for devices which don't change frequency */
    883	if (IS_ERR(clk))
    884		return 0;
    885
    886	ret = clk_set_rate(clk, freq);
    887	if (ret) {
    888		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
    889			ret);
    890	}
    891
    892	return ret;
    893}
    894
    895static int _generic_set_opp_regulator(struct opp_table *opp_table,
    896				      struct device *dev,
    897				      struct dev_pm_opp *opp,
    898				      unsigned long freq,
    899				      int scaling_down)
    900{
    901	struct regulator *reg = opp_table->regulators[0];
    902	struct dev_pm_opp *old_opp = opp_table->current_opp;
    903	int ret;
    904
    905	/* This function only supports single regulator per device */
    906	if (WARN_ON(opp_table->regulator_count > 1)) {
    907		dev_err(dev, "multiple regulators are not supported\n");
    908		return -EINVAL;
    909	}
    910
    911	/* Scaling up? Scale voltage before frequency */
    912	if (!scaling_down) {
    913		ret = _set_opp_voltage(dev, reg, opp->supplies);
    914		if (ret)
    915			goto restore_voltage;
    916	}
    917
    918	/* Change frequency */
    919	ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
    920	if (ret)
    921		goto restore_voltage;
    922
    923	/* Scaling down? Scale voltage after frequency */
    924	if (scaling_down) {
    925		ret = _set_opp_voltage(dev, reg, opp->supplies);
    926		if (ret)
    927			goto restore_freq;
    928	}
    929
    930	/*
    931	 * Enable the regulator after setting its voltages, otherwise it breaks
    932	 * some boot-enabled regulators.
    933	 */
    934	if (unlikely(!opp_table->enabled)) {
    935		ret = regulator_enable(reg);
    936		if (ret < 0)
    937			dev_warn(dev, "Failed to enable regulator: %d", ret);
    938	}
    939
    940	return 0;
    941
    942restore_freq:
    943	if (_generic_set_opp_clk_only(dev, opp_table->clk, old_opp->rate))
    944		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
    945			__func__, old_opp->rate);
    946restore_voltage:
    947	/* This shouldn't harm even if the voltages weren't updated earlier */
    948	_set_opp_voltage(dev, reg, old_opp->supplies);
    949
    950	return ret;
    951}
    952
    953static int _set_opp_bw(const struct opp_table *opp_table,
    954		       struct dev_pm_opp *opp, struct device *dev)
    955{
    956	u32 avg, peak;
    957	int i, ret;
    958
    959	if (!opp_table->paths)
    960		return 0;
    961
    962	for (i = 0; i < opp_table->path_count; i++) {
    963		if (!opp) {
    964			avg = 0;
    965			peak = 0;
    966		} else {
    967			avg = opp->bandwidth[i].avg;
    968			peak = opp->bandwidth[i].peak;
    969		}
    970		ret = icc_set_bw(opp_table->paths[i], avg, peak);
    971		if (ret) {
    972			dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
    973				opp ? "set" : "remove", i, ret);
    974			return ret;
    975		}
    976	}
    977
    978	return 0;
    979}
    980
    981static int _set_opp_custom(const struct opp_table *opp_table,
    982			   struct device *dev, struct dev_pm_opp *opp,
    983			   unsigned long freq)
    984{
    985	struct dev_pm_set_opp_data *data = opp_table->set_opp_data;
    986	struct dev_pm_opp *old_opp = opp_table->current_opp;
    987	int size;
    988
    989	/*
    990	 * We support this only if dev_pm_opp_set_regulators() was called
    991	 * earlier.
    992	 */
    993	if (opp_table->sod_supplies) {
    994		size = sizeof(*old_opp->supplies) * opp_table->regulator_count;
    995		memcpy(data->old_opp.supplies, old_opp->supplies, size);
    996		memcpy(data->new_opp.supplies, opp->supplies, size);
    997		data->regulator_count = opp_table->regulator_count;
    998	} else {
    999		data->regulator_count = 0;
   1000	}
   1001
   1002	data->regulators = opp_table->regulators;
   1003	data->clk = opp_table->clk;
   1004	data->dev = dev;
   1005	data->old_opp.rate = old_opp->rate;
   1006	data->new_opp.rate = freq;
   1007
   1008	return opp_table->set_opp(data);
   1009}
   1010
   1011static int _set_required_opp(struct device *dev, struct device *pd_dev,
   1012			     struct dev_pm_opp *opp, int i)
   1013{
   1014	unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
   1015	int ret;
   1016
   1017	if (!pd_dev)
   1018		return 0;
   1019
   1020	ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
   1021	if (ret) {
   1022		dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
   1023			dev_name(pd_dev), pstate, ret);
   1024	}
   1025
   1026	return ret;
   1027}
   1028
   1029/* This is only called for PM domain for now */
   1030static int _set_required_opps(struct device *dev,
   1031			      struct opp_table *opp_table,
   1032			      struct dev_pm_opp *opp, bool up)
   1033{
   1034	struct opp_table **required_opp_tables = opp_table->required_opp_tables;
   1035	struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
   1036	int i, ret = 0;
   1037
   1038	if (!required_opp_tables)
   1039		return 0;
   1040
   1041	/* required-opps not fully initialized yet */
   1042	if (lazy_linking_pending(opp_table))
   1043		return -EBUSY;
   1044
   1045	/*
   1046	 * We only support genpd's OPPs in the "required-opps" for now, as we
   1047	 * don't know much about other use cases. Error out if the required OPP
   1048	 * doesn't belong to a genpd.
   1049	 */
   1050	if (unlikely(!required_opp_tables[0]->is_genpd)) {
   1051		dev_err(dev, "required-opps don't belong to a genpd\n");
   1052		return -ENOENT;
   1053	}
   1054
   1055	/* Single genpd case */
   1056	if (!genpd_virt_devs)
   1057		return _set_required_opp(dev, dev, opp, 0);
   1058
   1059	/* Multiple genpd case */
   1060
   1061	/*
   1062	 * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
   1063	 * after it is freed from another thread.
   1064	 */
   1065	mutex_lock(&opp_table->genpd_virt_dev_lock);
   1066
   1067	/* Scaling up? Set required OPPs in normal order, else reverse */
   1068	if (up) {
   1069		for (i = 0; i < opp_table->required_opp_count; i++) {
   1070			ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
   1071			if (ret)
   1072				break;
   1073		}
   1074	} else {
   1075		for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
   1076			ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
   1077			if (ret)
   1078				break;
   1079		}
   1080	}
   1081
   1082	mutex_unlock(&opp_table->genpd_virt_dev_lock);
   1083
   1084	return ret;
   1085}
   1086
   1087static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
   1088{
   1089	struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
   1090	unsigned long freq;
   1091
   1092	if (!IS_ERR(opp_table->clk)) {
   1093		freq = clk_get_rate(opp_table->clk);
   1094		opp = _find_freq_ceil(opp_table, &freq);
   1095	}
   1096
   1097	/*
   1098	 * Unable to find the current OPP ? Pick the first from the list since
   1099	 * it is in ascending order, otherwise rest of the code will need to
   1100	 * make special checks to validate current_opp.
   1101	 */
   1102	if (IS_ERR(opp)) {
   1103		mutex_lock(&opp_table->lock);
   1104		opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
   1105		dev_pm_opp_get(opp);
   1106		mutex_unlock(&opp_table->lock);
   1107	}
   1108
   1109	opp_table->current_opp = opp;
   1110}
   1111
   1112static int _disable_opp_table(struct device *dev, struct opp_table *opp_table)
   1113{
   1114	int ret;
   1115
   1116	if (!opp_table->enabled)
   1117		return 0;
   1118
   1119	/*
   1120	 * Some drivers need to support cases where some platforms may
   1121	 * have OPP table for the device, while others don't and
   1122	 * opp_set_rate() just needs to behave like clk_set_rate().
   1123	 */
   1124	if (!_get_opp_count(opp_table))
   1125		return 0;
   1126
   1127	ret = _set_opp_bw(opp_table, NULL, dev);
   1128	if (ret)
   1129		return ret;
   1130
   1131	if (opp_table->regulators)
   1132		regulator_disable(opp_table->regulators[0]);
   1133
   1134	ret = _set_required_opps(dev, opp_table, NULL, false);
   1135
   1136	opp_table->enabled = false;
   1137	return ret;
   1138}
   1139
   1140static int _set_opp(struct device *dev, struct opp_table *opp_table,
   1141		    struct dev_pm_opp *opp, unsigned long freq)
   1142{
   1143	struct dev_pm_opp *old_opp;
   1144	int scaling_down, ret;
   1145
   1146	if (unlikely(!opp))
   1147		return _disable_opp_table(dev, opp_table);
   1148
   1149	/* Find the currently set OPP if we don't know already */
   1150	if (unlikely(!opp_table->current_opp))
   1151		_find_current_opp(dev, opp_table);
   1152
   1153	old_opp = opp_table->current_opp;
   1154
   1155	/* Return early if nothing to do */
   1156	if (old_opp == opp && opp_table->current_rate == freq &&
   1157	    opp_table->enabled) {
   1158		dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
   1159		return 0;
   1160	}
   1161
   1162	dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n",
   1163		__func__, opp_table->current_rate, freq, old_opp->level,
   1164		opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0,
   1165		opp->bandwidth ? opp->bandwidth[0].peak : 0);
   1166
   1167	scaling_down = _opp_compare_key(old_opp, opp);
   1168	if (scaling_down == -1)
   1169		scaling_down = 0;
   1170
   1171	/* Scaling up? Configure required OPPs before frequency */
   1172	if (!scaling_down) {
   1173		ret = _set_required_opps(dev, opp_table, opp, true);
   1174		if (ret) {
   1175			dev_err(dev, "Failed to set required opps: %d\n", ret);
   1176			return ret;
   1177		}
   1178
   1179		ret = _set_opp_bw(opp_table, opp, dev);
   1180		if (ret) {
   1181			dev_err(dev, "Failed to set bw: %d\n", ret);
   1182			return ret;
   1183		}
   1184	}
   1185
   1186	if (opp_table->set_opp) {
   1187		ret = _set_opp_custom(opp_table, dev, opp, freq);
   1188	} else if (opp_table->regulators) {
   1189		ret = _generic_set_opp_regulator(opp_table, dev, opp, freq,
   1190						 scaling_down);
   1191	} else {
   1192		/* Only frequency scaling */
   1193		ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
   1194	}
   1195
   1196	if (ret)
   1197		return ret;
   1198
   1199	/* Scaling down? Configure required OPPs after frequency */
   1200	if (scaling_down) {
   1201		ret = _set_opp_bw(opp_table, opp, dev);
   1202		if (ret) {
   1203			dev_err(dev, "Failed to set bw: %d\n", ret);
   1204			return ret;
   1205		}
   1206
   1207		ret = _set_required_opps(dev, opp_table, opp, false);
   1208		if (ret) {
   1209			dev_err(dev, "Failed to set required opps: %d\n", ret);
   1210			return ret;
   1211		}
   1212	}
   1213
   1214	opp_table->enabled = true;
   1215	dev_pm_opp_put(old_opp);
   1216
   1217	/* Make sure current_opp doesn't get freed */
   1218	dev_pm_opp_get(opp);
   1219	opp_table->current_opp = opp;
   1220	opp_table->current_rate = freq;
   1221
   1222	return ret;
   1223}
   1224
   1225/**
   1226 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
   1227 * @dev:	 device for which we do this operation
   1228 * @target_freq: frequency to achieve
   1229 *
   1230 * This configures the power-supplies to the levels specified by the OPP
   1231 * corresponding to the target_freq, and programs the clock to a value <=
   1232 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
   1233 * provided by the opp, should have already rounded to the target OPP's
   1234 * frequency.
   1235 */
   1236int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
   1237{
   1238	struct opp_table *opp_table;
   1239	unsigned long freq = 0, temp_freq;
   1240	struct dev_pm_opp *opp = NULL;
   1241	int ret;
   1242
   1243	opp_table = _find_opp_table(dev);
   1244	if (IS_ERR(opp_table)) {
   1245		dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
   1246		return PTR_ERR(opp_table);
   1247	}
   1248
   1249	if (target_freq) {
   1250		/*
   1251		 * For IO devices which require an OPP on some platforms/SoCs
   1252		 * while just needing to scale the clock on some others
   1253		 * we look for empty OPP tables with just a clock handle and
   1254		 * scale only the clk. This makes dev_pm_opp_set_rate()
   1255		 * equivalent to a clk_set_rate()
   1256		 */
   1257		if (!_get_opp_count(opp_table)) {
   1258			ret = _generic_set_opp_clk_only(dev, opp_table->clk, target_freq);
   1259			goto put_opp_table;
   1260		}
   1261
   1262		freq = clk_round_rate(opp_table->clk, target_freq);
   1263		if ((long)freq <= 0)
   1264			freq = target_freq;
   1265
   1266		/*
   1267		 * The clock driver may support finer resolution of the
   1268		 * frequencies than the OPP table, don't update the frequency we
   1269		 * pass to clk_set_rate() here.
   1270		 */
   1271		temp_freq = freq;
   1272		opp = _find_freq_ceil(opp_table, &temp_freq);
   1273		if (IS_ERR(opp)) {
   1274			ret = PTR_ERR(opp);
   1275			dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
   1276				__func__, freq, ret);
   1277			goto put_opp_table;
   1278		}
   1279	}
   1280
   1281	ret = _set_opp(dev, opp_table, opp, freq);
   1282
   1283	if (target_freq)
   1284		dev_pm_opp_put(opp);
   1285put_opp_table:
   1286	dev_pm_opp_put_opp_table(opp_table);
   1287	return ret;
   1288}
   1289EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
   1290
   1291/**
   1292 * dev_pm_opp_set_opp() - Configure device for OPP
   1293 * @dev: device for which we do this operation
   1294 * @opp: OPP to set to
   1295 *
   1296 * This configures the device based on the properties of the OPP passed to this
   1297 * routine.
   1298 *
   1299 * Return: 0 on success, a negative error number otherwise.
   1300 */
   1301int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
   1302{
   1303	struct opp_table *opp_table;
   1304	int ret;
   1305
   1306	opp_table = _find_opp_table(dev);
   1307	if (IS_ERR(opp_table)) {
   1308		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
   1309		return PTR_ERR(opp_table);
   1310	}
   1311
   1312	ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0);
   1313	dev_pm_opp_put_opp_table(opp_table);
   1314
   1315	return ret;
   1316}
   1317EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
   1318
   1319/* OPP-dev Helpers */
   1320static void _remove_opp_dev(struct opp_device *opp_dev,
   1321			    struct opp_table *opp_table)
   1322{
   1323	opp_debug_unregister(opp_dev, opp_table);
   1324	list_del(&opp_dev->node);
   1325	kfree(opp_dev);
   1326}
   1327
   1328struct opp_device *_add_opp_dev(const struct device *dev,
   1329				struct opp_table *opp_table)
   1330{
   1331	struct opp_device *opp_dev;
   1332
   1333	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
   1334	if (!opp_dev)
   1335		return NULL;
   1336
   1337	/* Initialize opp-dev */
   1338	opp_dev->dev = dev;
   1339
   1340	mutex_lock(&opp_table->lock);
   1341	list_add(&opp_dev->node, &opp_table->dev_list);
   1342	mutex_unlock(&opp_table->lock);
   1343
   1344	/* Create debugfs entries for the opp_table */
   1345	opp_debug_register(opp_dev, opp_table);
   1346
   1347	return opp_dev;
   1348}
   1349
   1350static struct opp_table *_allocate_opp_table(struct device *dev, int index)
   1351{
   1352	struct opp_table *opp_table;
   1353	struct opp_device *opp_dev;
   1354	int ret;
   1355
   1356	/*
   1357	 * Allocate a new OPP table. In the infrequent case where a new
   1358	 * device is needed to be added, we pay this penalty.
   1359	 */
   1360	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
   1361	if (!opp_table)
   1362		return ERR_PTR(-ENOMEM);
   1363
   1364	mutex_init(&opp_table->lock);
   1365	mutex_init(&opp_table->genpd_virt_dev_lock);
   1366	INIT_LIST_HEAD(&opp_table->dev_list);
   1367	INIT_LIST_HEAD(&opp_table->lazy);
   1368
   1369	/* Mark regulator count uninitialized */
   1370	opp_table->regulator_count = -1;
   1371
   1372	opp_dev = _add_opp_dev(dev, opp_table);
   1373	if (!opp_dev) {
   1374		ret = -ENOMEM;
   1375		goto err;
   1376	}
   1377
   1378	_of_init_opp_table(opp_table, dev, index);
   1379
   1380	/* Find interconnect path(s) for the device */
   1381	ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
   1382	if (ret) {
   1383		if (ret == -EPROBE_DEFER)
   1384			goto remove_opp_dev;
   1385
   1386		dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
   1387			 __func__, ret);
   1388	}
   1389
   1390	BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
   1391	INIT_LIST_HEAD(&opp_table->opp_list);
   1392	kref_init(&opp_table->kref);
   1393
   1394	return opp_table;
   1395
   1396remove_opp_dev:
   1397	_remove_opp_dev(opp_dev, opp_table);
   1398err:
   1399	kfree(opp_table);
   1400	return ERR_PTR(ret);
   1401}
   1402
   1403void _get_opp_table_kref(struct opp_table *opp_table)
   1404{
   1405	kref_get(&opp_table->kref);
   1406}
   1407
   1408static struct opp_table *_update_opp_table_clk(struct device *dev,
   1409					       struct opp_table *opp_table,
   1410					       bool getclk)
   1411{
   1412	int ret;
   1413
   1414	/*
   1415	 * Return early if we don't need to get clk or we have already tried it
   1416	 * earlier.
   1417	 */
   1418	if (!getclk || IS_ERR(opp_table) || opp_table->clk)
   1419		return opp_table;
   1420
   1421	/* Find clk for the device */
   1422	opp_table->clk = clk_get(dev, NULL);
   1423
   1424	ret = PTR_ERR_OR_ZERO(opp_table->clk);
   1425	if (!ret)
   1426		return opp_table;
   1427
   1428	if (ret == -ENOENT) {
   1429		dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
   1430		return opp_table;
   1431	}
   1432
   1433	dev_pm_opp_put_opp_table(opp_table);
   1434	dev_err_probe(dev, ret, "Couldn't find clock\n");
   1435
   1436	return ERR_PTR(ret);
   1437}
   1438
   1439/*
   1440 * We need to make sure that the OPP table for a device doesn't get added twice,
   1441 * if this routine gets called in parallel with the same device pointer.
   1442 *
   1443 * The simplest way to enforce that is to perform everything (find existing
   1444 * table and if not found, create a new one) under the opp_table_lock, so only
   1445 * one creator gets access to the same. But that expands the critical section
   1446 * under the lock and may end up causing circular dependencies with frameworks
   1447 * like debugfs, interconnect or clock framework as they may be direct or
   1448 * indirect users of OPP core.
   1449 *
   1450 * And for that reason we have to go for a bit tricky implementation here, which
   1451 * uses the opp_tables_busy flag to indicate if another creator is in the middle
   1452 * of adding an OPP table and others should wait for it to finish.
   1453 */
   1454struct opp_table *_add_opp_table_indexed(struct device *dev, int index,
   1455					 bool getclk)
   1456{
   1457	struct opp_table *opp_table;
   1458
   1459again:
   1460	mutex_lock(&opp_table_lock);
   1461
   1462	opp_table = _find_opp_table_unlocked(dev);
   1463	if (!IS_ERR(opp_table))
   1464		goto unlock;
   1465
   1466	/*
   1467	 * The opp_tables list or an OPP table's dev_list is getting updated by
   1468	 * another user, wait for it to finish.
   1469	 */
   1470	if (unlikely(opp_tables_busy)) {
   1471		mutex_unlock(&opp_table_lock);
   1472		cpu_relax();
   1473		goto again;
   1474	}
   1475
   1476	opp_tables_busy = true;
   1477	opp_table = _managed_opp(dev, index);
   1478
   1479	/* Drop the lock to reduce the size of critical section */
   1480	mutex_unlock(&opp_table_lock);
   1481
   1482	if (opp_table) {
   1483		if (!_add_opp_dev(dev, opp_table)) {
   1484			dev_pm_opp_put_opp_table(opp_table);
   1485			opp_table = ERR_PTR(-ENOMEM);
   1486		}
   1487
   1488		mutex_lock(&opp_table_lock);
   1489	} else {
   1490		opp_table = _allocate_opp_table(dev, index);
   1491
   1492		mutex_lock(&opp_table_lock);
   1493		if (!IS_ERR(opp_table))
   1494			list_add(&opp_table->node, &opp_tables);
   1495	}
   1496
   1497	opp_tables_busy = false;
   1498
   1499unlock:
   1500	mutex_unlock(&opp_table_lock);
   1501
   1502	return _update_opp_table_clk(dev, opp_table, getclk);
   1503}
   1504
   1505static struct opp_table *_add_opp_table(struct device *dev, bool getclk)
   1506{
   1507	return _add_opp_table_indexed(dev, 0, getclk);
   1508}
   1509
   1510struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
   1511{
   1512	return _find_opp_table(dev);
   1513}
   1514EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
   1515
   1516static void _opp_table_kref_release(struct kref *kref)
   1517{
   1518	struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
   1519	struct opp_device *opp_dev, *temp;
   1520	int i;
   1521
   1522	/* Drop the lock as soon as we can */
   1523	list_del(&opp_table->node);
   1524	mutex_unlock(&opp_table_lock);
   1525
   1526	if (opp_table->current_opp)
   1527		dev_pm_opp_put(opp_table->current_opp);
   1528
   1529	_of_clear_opp_table(opp_table);
   1530
   1531	/* Release clk */
   1532	if (!IS_ERR(opp_table->clk))
   1533		clk_put(opp_table->clk);
   1534
   1535	if (opp_table->paths) {
   1536		for (i = 0; i < opp_table->path_count; i++)
   1537			icc_put(opp_table->paths[i]);
   1538		kfree(opp_table->paths);
   1539	}
   1540
   1541	WARN_ON(!list_empty(&opp_table->opp_list));
   1542
   1543	list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
   1544		/*
   1545		 * The OPP table is getting removed, drop the performance state
   1546		 * constraints.
   1547		 */
   1548		if (opp_table->genpd_performance_state)
   1549			dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
   1550
   1551		_remove_opp_dev(opp_dev, opp_table);
   1552	}
   1553
   1554	mutex_destroy(&opp_table->genpd_virt_dev_lock);
   1555	mutex_destroy(&opp_table->lock);
   1556	kfree(opp_table);
   1557}
   1558
   1559void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
   1560{
   1561	kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
   1562		       &opp_table_lock);
   1563}
   1564EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table);
   1565
   1566void _opp_free(struct dev_pm_opp *opp)
   1567{
   1568	kfree(opp);
   1569}
   1570
   1571static void _opp_kref_release(struct kref *kref)
   1572{
   1573	struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
   1574	struct opp_table *opp_table = opp->opp_table;
   1575
   1576	list_del(&opp->node);
   1577	mutex_unlock(&opp_table->lock);
   1578
   1579	/*
   1580	 * Notify the changes in the availability of the operable
   1581	 * frequency/voltage list.
   1582	 */
   1583	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
   1584	_of_opp_free_required_opps(opp_table, opp);
   1585	opp_debug_remove_one(opp);
   1586	kfree(opp);
   1587}
   1588
   1589void dev_pm_opp_get(struct dev_pm_opp *opp)
   1590{
   1591	kref_get(&opp->kref);
   1592}
   1593
   1594void dev_pm_opp_put(struct dev_pm_opp *opp)
   1595{
   1596	kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock);
   1597}
   1598EXPORT_SYMBOL_GPL(dev_pm_opp_put);
   1599
   1600/**
   1601 * dev_pm_opp_remove()  - Remove an OPP from OPP table
   1602 * @dev:	device for which we do this operation
   1603 * @freq:	OPP to remove with matching 'freq'
   1604 *
   1605 * This function removes an opp from the opp table.
   1606 */
   1607void dev_pm_opp_remove(struct device *dev, unsigned long freq)
   1608{
   1609	struct dev_pm_opp *opp = NULL, *iter;
   1610	struct opp_table *opp_table;
   1611
   1612	opp_table = _find_opp_table(dev);
   1613	if (IS_ERR(opp_table))
   1614		return;
   1615
   1616	mutex_lock(&opp_table->lock);
   1617
   1618	list_for_each_entry(iter, &opp_table->opp_list, node) {
   1619		if (iter->rate == freq) {
   1620			opp = iter;
   1621			break;
   1622		}
   1623	}
   1624
   1625	mutex_unlock(&opp_table->lock);
   1626
   1627	if (opp) {
   1628		dev_pm_opp_put(opp);
   1629
   1630		/* Drop the reference taken by dev_pm_opp_add() */
   1631		dev_pm_opp_put_opp_table(opp_table);
   1632	} else {
   1633		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
   1634			 __func__, freq);
   1635	}
   1636
   1637	/* Drop the reference taken by _find_opp_table() */
   1638	dev_pm_opp_put_opp_table(opp_table);
   1639}
   1640EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
   1641
   1642static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
   1643					bool dynamic)
   1644{
   1645	struct dev_pm_opp *opp = NULL, *temp;
   1646
   1647	mutex_lock(&opp_table->lock);
   1648	list_for_each_entry(temp, &opp_table->opp_list, node) {
   1649		/*
   1650		 * Refcount must be dropped only once for each OPP by OPP core,
   1651		 * do that with help of "removed" flag.
   1652		 */
   1653		if (!temp->removed && dynamic == temp->dynamic) {
   1654			opp = temp;
   1655			break;
   1656		}
   1657	}
   1658
   1659	mutex_unlock(&opp_table->lock);
   1660	return opp;
   1661}
   1662
   1663/*
   1664 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
   1665 * happen lock less to avoid circular dependency issues. This routine must be
   1666 * called without the opp_table->lock held.
   1667 */
   1668static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
   1669{
   1670	struct dev_pm_opp *opp;
   1671
   1672	while ((opp = _opp_get_next(opp_table, dynamic))) {
   1673		opp->removed = true;
   1674		dev_pm_opp_put(opp);
   1675
   1676		/* Drop the references taken by dev_pm_opp_add() */
   1677		if (dynamic)
   1678			dev_pm_opp_put_opp_table(opp_table);
   1679	}
   1680}
   1681
   1682bool _opp_remove_all_static(struct opp_table *opp_table)
   1683{
   1684	mutex_lock(&opp_table->lock);
   1685
   1686	if (!opp_table->parsed_static_opps) {
   1687		mutex_unlock(&opp_table->lock);
   1688		return false;
   1689	}
   1690
   1691	if (--opp_table->parsed_static_opps) {
   1692		mutex_unlock(&opp_table->lock);
   1693		return true;
   1694	}
   1695
   1696	mutex_unlock(&opp_table->lock);
   1697
   1698	_opp_remove_all(opp_table, false);
   1699	return true;
   1700}
   1701
   1702/**
   1703 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
   1704 * @dev:	device for which we do this operation
   1705 *
   1706 * This function removes all dynamically created OPPs from the opp table.
   1707 */
   1708void dev_pm_opp_remove_all_dynamic(struct device *dev)
   1709{
   1710	struct opp_table *opp_table;
   1711
   1712	opp_table = _find_opp_table(dev);
   1713	if (IS_ERR(opp_table))
   1714		return;
   1715
   1716	_opp_remove_all(opp_table, true);
   1717
   1718	/* Drop the reference taken by _find_opp_table() */
   1719	dev_pm_opp_put_opp_table(opp_table);
   1720}
   1721EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
   1722
   1723struct dev_pm_opp *_opp_allocate(struct opp_table *table)
   1724{
   1725	struct dev_pm_opp *opp;
   1726	int supply_count, supply_size, icc_size;
   1727
   1728	/* Allocate space for at least one supply */
   1729	supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
   1730	supply_size = sizeof(*opp->supplies) * supply_count;
   1731	icc_size = sizeof(*opp->bandwidth) * table->path_count;
   1732
   1733	/* allocate new OPP node and supplies structures */
   1734	opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
   1735
   1736	if (!opp)
   1737		return NULL;
   1738
   1739	/* Put the supplies at the end of the OPP structure as an empty array */
   1740	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
   1741	if (icc_size)
   1742		opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
   1743	INIT_LIST_HEAD(&opp->node);
   1744
   1745	return opp;
   1746}
   1747
   1748static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
   1749					 struct opp_table *opp_table)
   1750{
   1751	struct regulator *reg;
   1752	int i;
   1753
   1754	if (!opp_table->regulators)
   1755		return true;
   1756
   1757	for (i = 0; i < opp_table->regulator_count; i++) {
   1758		reg = opp_table->regulators[i];
   1759
   1760		if (!regulator_is_supported_voltage(reg,
   1761					opp->supplies[i].u_volt_min,
   1762					opp->supplies[i].u_volt_max)) {
   1763			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
   1764				__func__, opp->supplies[i].u_volt_min,
   1765				opp->supplies[i].u_volt_max);
   1766			return false;
   1767		}
   1768	}
   1769
   1770	return true;
   1771}
   1772
   1773int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
   1774{
   1775	if (opp1->rate != opp2->rate)
   1776		return opp1->rate < opp2->rate ? -1 : 1;
   1777	if (opp1->bandwidth && opp2->bandwidth &&
   1778	    opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
   1779		return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
   1780	if (opp1->level != opp2->level)
   1781		return opp1->level < opp2->level ? -1 : 1;
   1782	return 0;
   1783}
   1784
   1785static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
   1786			     struct opp_table *opp_table,
   1787			     struct list_head **head)
   1788{
   1789	struct dev_pm_opp *opp;
   1790	int opp_cmp;
   1791
   1792	/*
   1793	 * Insert new OPP in order of increasing frequency and discard if
   1794	 * already present.
   1795	 *
   1796	 * Need to use &opp_table->opp_list in the condition part of the 'for'
   1797	 * loop, don't replace it with head otherwise it will become an infinite
   1798	 * loop.
   1799	 */
   1800	list_for_each_entry(opp, &opp_table->opp_list, node) {
   1801		opp_cmp = _opp_compare_key(new_opp, opp);
   1802		if (opp_cmp > 0) {
   1803			*head = &opp->node;
   1804			continue;
   1805		}
   1806
   1807		if (opp_cmp < 0)
   1808			return 0;
   1809
   1810		/* Duplicate OPPs */
   1811		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
   1812			 __func__, opp->rate, opp->supplies[0].u_volt,
   1813			 opp->available, new_opp->rate,
   1814			 new_opp->supplies[0].u_volt, new_opp->available);
   1815
   1816		/* Should we compare voltages for all regulators here ? */
   1817		return opp->available &&
   1818		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
   1819	}
   1820
   1821	return 0;
   1822}
   1823
   1824void _required_opps_available(struct dev_pm_opp *opp, int count)
   1825{
   1826	int i;
   1827
   1828	for (i = 0; i < count; i++) {
   1829		if (opp->required_opps[i]->available)
   1830			continue;
   1831
   1832		opp->available = false;
   1833		pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
   1834			 __func__, opp->required_opps[i]->np, opp->rate);
   1835		return;
   1836	}
   1837}
   1838
   1839/*
   1840 * Returns:
   1841 * 0: On success. And appropriate error message for duplicate OPPs.
   1842 * -EBUSY: For OPP with same freq/volt and is available. The callers of
   1843 *  _opp_add() must return 0 if they receive -EBUSY from it. This is to make
   1844 *  sure we don't print error messages unnecessarily if different parts of
   1845 *  kernel try to initialize the OPP table.
   1846 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
   1847 *  should be considered an error by the callers of _opp_add().
   1848 */
   1849int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
   1850	     struct opp_table *opp_table, bool rate_not_available)
   1851{
   1852	struct list_head *head;
   1853	int ret;
   1854
   1855	mutex_lock(&opp_table->lock);
   1856	head = &opp_table->opp_list;
   1857
   1858	ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
   1859	if (ret) {
   1860		mutex_unlock(&opp_table->lock);
   1861		return ret;
   1862	}
   1863
   1864	list_add(&new_opp->node, head);
   1865	mutex_unlock(&opp_table->lock);
   1866
   1867	new_opp->opp_table = opp_table;
   1868	kref_init(&new_opp->kref);
   1869
   1870	opp_debug_create_one(new_opp, opp_table);
   1871
   1872	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
   1873		new_opp->available = false;
   1874		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
   1875			 __func__, new_opp->rate);
   1876	}
   1877
   1878	/* required-opps not fully initialized yet */
   1879	if (lazy_linking_pending(opp_table))
   1880		return 0;
   1881
   1882	_required_opps_available(new_opp, opp_table->required_opp_count);
   1883
   1884	return 0;
   1885}
   1886
   1887/**
   1888 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
   1889 * @opp_table:	OPP table
   1890 * @dev:	device for which we do this operation
   1891 * @freq:	Frequency in Hz for this OPP
   1892 * @u_volt:	Voltage in uVolts for this OPP
   1893 * @dynamic:	Dynamically added OPPs.
   1894 *
   1895 * This function adds an opp definition to the opp table and returns status.
   1896 * The opp is made available by default and it can be controlled using
   1897 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
   1898 *
   1899 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
   1900 * and freed by dev_pm_opp_of_remove_table.
   1901 *
   1902 * Return:
   1903 * 0		On success OR
   1904 *		Duplicate OPPs (both freq and volt are same) and opp->available
   1905 * -EEXIST	Freq are same and volt are different OR
   1906 *		Duplicate OPPs (both freq and volt are same) and !opp->available
   1907 * -ENOMEM	Memory allocation failure
   1908 */
   1909int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
   1910		unsigned long freq, long u_volt, bool dynamic)
   1911{
   1912	struct dev_pm_opp *new_opp;
   1913	unsigned long tol;
   1914	int ret;
   1915
   1916	new_opp = _opp_allocate(opp_table);
   1917	if (!new_opp)
   1918		return -ENOMEM;
   1919
   1920	/* populate the opp table */
   1921	new_opp->rate = freq;
   1922	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
   1923	new_opp->supplies[0].u_volt = u_volt;
   1924	new_opp->supplies[0].u_volt_min = u_volt - tol;
   1925	new_opp->supplies[0].u_volt_max = u_volt + tol;
   1926	new_opp->available = true;
   1927	new_opp->dynamic = dynamic;
   1928
   1929	ret = _opp_add(dev, new_opp, opp_table, false);
   1930	if (ret) {
   1931		/* Don't return error for duplicate OPPs */
   1932		if (ret == -EBUSY)
   1933			ret = 0;
   1934		goto free_opp;
   1935	}
   1936
   1937	/*
   1938	 * Notify the changes in the availability of the operable
   1939	 * frequency/voltage list.
   1940	 */
   1941	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
   1942	return 0;
   1943
   1944free_opp:
   1945	_opp_free(new_opp);
   1946
   1947	return ret;
   1948}
   1949
   1950/**
   1951 * dev_pm_opp_set_supported_hw() - Set supported platforms
   1952 * @dev: Device for which supported-hw has to be set.
   1953 * @versions: Array of hierarchy of versions to match.
   1954 * @count: Number of elements in the array.
   1955 *
   1956 * This is required only for the V2 bindings, and it enables a platform to
   1957 * specify the hierarchy of versions it supports. OPP layer will then enable
   1958 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
   1959 * property.
   1960 */
   1961struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
   1962			const u32 *versions, unsigned int count)
   1963{
   1964	struct opp_table *opp_table;
   1965
   1966	opp_table = _add_opp_table(dev, false);
   1967	if (IS_ERR(opp_table))
   1968		return opp_table;
   1969
   1970	/* Make sure there are no concurrent readers while updating opp_table */
   1971	WARN_ON(!list_empty(&opp_table->opp_list));
   1972
   1973	/* Another CPU that shares the OPP table has set the property ? */
   1974	if (opp_table->supported_hw)
   1975		return opp_table;
   1976
   1977	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
   1978					GFP_KERNEL);
   1979	if (!opp_table->supported_hw) {
   1980		dev_pm_opp_put_opp_table(opp_table);
   1981		return ERR_PTR(-ENOMEM);
   1982	}
   1983
   1984	opp_table->supported_hw_count = count;
   1985
   1986	return opp_table;
   1987}
   1988EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
   1989
   1990/**
   1991 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
   1992 * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw().
   1993 *
   1994 * This is required only for the V2 bindings, and is called for a matching
   1995 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
   1996 * will not be freed.
   1997 */
   1998void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
   1999{
   2000	if (unlikely(!opp_table))
   2001		return;
   2002
   2003	kfree(opp_table->supported_hw);
   2004	opp_table->supported_hw = NULL;
   2005	opp_table->supported_hw_count = 0;
   2006
   2007	dev_pm_opp_put_opp_table(opp_table);
   2008}
   2009EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
   2010
   2011static void devm_pm_opp_supported_hw_release(void *data)
   2012{
   2013	dev_pm_opp_put_supported_hw(data);
   2014}
   2015
   2016/**
   2017 * devm_pm_opp_set_supported_hw() - Set supported platforms
   2018 * @dev: Device for which supported-hw has to be set.
   2019 * @versions: Array of hierarchy of versions to match.
   2020 * @count: Number of elements in the array.
   2021 *
   2022 * This is a resource-managed variant of dev_pm_opp_set_supported_hw().
   2023 *
   2024 * Return: 0 on success and errorno otherwise.
   2025 */
   2026int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
   2027				 unsigned int count)
   2028{
   2029	struct opp_table *opp_table;
   2030
   2031	opp_table = dev_pm_opp_set_supported_hw(dev, versions, count);
   2032	if (IS_ERR(opp_table))
   2033		return PTR_ERR(opp_table);
   2034
   2035	return devm_add_action_or_reset(dev, devm_pm_opp_supported_hw_release,
   2036					opp_table);
   2037}
   2038EXPORT_SYMBOL_GPL(devm_pm_opp_set_supported_hw);
   2039
   2040/**
   2041 * dev_pm_opp_set_prop_name() - Set prop-extn name
   2042 * @dev: Device for which the prop-name has to be set.
   2043 * @name: name to postfix to properties.
   2044 *
   2045 * This is required only for the V2 bindings, and it enables a platform to
   2046 * specify the extn to be used for certain property names. The properties to
   2047 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
   2048 * should postfix the property name with -<name> while looking for them.
   2049 */
   2050struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
   2051{
   2052	struct opp_table *opp_table;
   2053
   2054	opp_table = _add_opp_table(dev, false);
   2055	if (IS_ERR(opp_table))
   2056		return opp_table;
   2057
   2058	/* Make sure there are no concurrent readers while updating opp_table */
   2059	WARN_ON(!list_empty(&opp_table->opp_list));
   2060
   2061	/* Another CPU that shares the OPP table has set the property ? */
   2062	if (opp_table->prop_name)
   2063		return opp_table;
   2064
   2065	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
   2066	if (!opp_table->prop_name) {
   2067		dev_pm_opp_put_opp_table(opp_table);
   2068		return ERR_PTR(-ENOMEM);
   2069	}
   2070
   2071	return opp_table;
   2072}
   2073EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
   2074
   2075/**
   2076 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
   2077 * @opp_table: OPP table returned by dev_pm_opp_set_prop_name().
   2078 *
   2079 * This is required only for the V2 bindings, and is called for a matching
   2080 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
   2081 * will not be freed.
   2082 */
   2083void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
   2084{
   2085	if (unlikely(!opp_table))
   2086		return;
   2087
   2088	kfree(opp_table->prop_name);
   2089	opp_table->prop_name = NULL;
   2090
   2091	dev_pm_opp_put_opp_table(opp_table);
   2092}
   2093EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
   2094
   2095/**
   2096 * dev_pm_opp_set_regulators() - Set regulator names for the device
   2097 * @dev: Device for which regulator name is being set.
   2098 * @names: Array of pointers to the names of the regulator.
   2099 * @count: Number of regulators.
   2100 *
   2101 * In order to support OPP switching, OPP layer needs to know the name of the
   2102 * device's regulators, as the core would be required to switch voltages as
   2103 * well.
   2104 *
   2105 * This must be called before any OPPs are initialized for the device.
   2106 */
   2107struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
   2108					    const char * const names[],
   2109					    unsigned int count)
   2110{
   2111	struct dev_pm_opp_supply *supplies;
   2112	struct opp_table *opp_table;
   2113	struct regulator *reg;
   2114	int ret, i;
   2115
   2116	opp_table = _add_opp_table(dev, false);
   2117	if (IS_ERR(opp_table))
   2118		return opp_table;
   2119
   2120	/* This should be called before OPPs are initialized */
   2121	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
   2122		ret = -EBUSY;
   2123		goto err;
   2124	}
   2125
   2126	/* Another CPU that shares the OPP table has set the regulators ? */
   2127	if (opp_table->regulators)
   2128		return opp_table;
   2129
   2130	opp_table->regulators = kmalloc_array(count,
   2131					      sizeof(*opp_table->regulators),
   2132					      GFP_KERNEL);
   2133	if (!opp_table->regulators) {
   2134		ret = -ENOMEM;
   2135		goto err;
   2136	}
   2137
   2138	for (i = 0; i < count; i++) {
   2139		reg = regulator_get_optional(dev, names[i]);
   2140		if (IS_ERR(reg)) {
   2141			ret = dev_err_probe(dev, PTR_ERR(reg),
   2142					    "%s: no regulator (%s) found\n",
   2143					    __func__, names[i]);
   2144			goto free_regulators;
   2145		}
   2146
   2147		opp_table->regulators[i] = reg;
   2148	}
   2149
   2150	opp_table->regulator_count = count;
   2151
   2152	supplies = kmalloc_array(count * 2, sizeof(*supplies), GFP_KERNEL);
   2153	if (!supplies) {
   2154		ret = -ENOMEM;
   2155		goto free_regulators;
   2156	}
   2157
   2158	mutex_lock(&opp_table->lock);
   2159	opp_table->sod_supplies = supplies;
   2160	if (opp_table->set_opp_data) {
   2161		opp_table->set_opp_data->old_opp.supplies = supplies;
   2162		opp_table->set_opp_data->new_opp.supplies = supplies + count;
   2163	}
   2164	mutex_unlock(&opp_table->lock);
   2165
   2166	return opp_table;
   2167
   2168free_regulators:
   2169	while (i != 0)
   2170		regulator_put(opp_table->regulators[--i]);
   2171
   2172	kfree(opp_table->regulators);
   2173	opp_table->regulators = NULL;
   2174	opp_table->regulator_count = -1;
   2175err:
   2176	dev_pm_opp_put_opp_table(opp_table);
   2177
   2178	return ERR_PTR(ret);
   2179}
   2180EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
   2181
   2182/**
   2183 * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
   2184 * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
   2185 */
   2186void dev_pm_opp_put_regulators(struct opp_table *opp_table)
   2187{
   2188	int i;
   2189
   2190	if (unlikely(!opp_table))
   2191		return;
   2192
   2193	if (!opp_table->regulators)
   2194		goto put_opp_table;
   2195
   2196	if (opp_table->enabled) {
   2197		for (i = opp_table->regulator_count - 1; i >= 0; i--)
   2198			regulator_disable(opp_table->regulators[i]);
   2199	}
   2200
   2201	for (i = opp_table->regulator_count - 1; i >= 0; i--)
   2202		regulator_put(opp_table->regulators[i]);
   2203
   2204	mutex_lock(&opp_table->lock);
   2205	if (opp_table->set_opp_data) {
   2206		opp_table->set_opp_data->old_opp.supplies = NULL;
   2207		opp_table->set_opp_data->new_opp.supplies = NULL;
   2208	}
   2209
   2210	kfree(opp_table->sod_supplies);
   2211	opp_table->sod_supplies = NULL;
   2212	mutex_unlock(&opp_table->lock);
   2213
   2214	kfree(opp_table->regulators);
   2215	opp_table->regulators = NULL;
   2216	opp_table->regulator_count = -1;
   2217
   2218put_opp_table:
   2219	dev_pm_opp_put_opp_table(opp_table);
   2220}
   2221EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
   2222
   2223static void devm_pm_opp_regulators_release(void *data)
   2224{
   2225	dev_pm_opp_put_regulators(data);
   2226}
   2227
   2228/**
   2229 * devm_pm_opp_set_regulators() - Set regulator names for the device
   2230 * @dev: Device for which regulator name is being set.
   2231 * @names: Array of pointers to the names of the regulator.
   2232 * @count: Number of regulators.
   2233 *
   2234 * This is a resource-managed variant of dev_pm_opp_set_regulators().
   2235 *
   2236 * Return: 0 on success and errorno otherwise.
   2237 */
   2238int devm_pm_opp_set_regulators(struct device *dev,
   2239			       const char * const names[],
   2240			       unsigned int count)
   2241{
   2242	struct opp_table *opp_table;
   2243
   2244	opp_table = dev_pm_opp_set_regulators(dev, names, count);
   2245	if (IS_ERR(opp_table))
   2246		return PTR_ERR(opp_table);
   2247
   2248	return devm_add_action_or_reset(dev, devm_pm_opp_regulators_release,
   2249					opp_table);
   2250}
   2251EXPORT_SYMBOL_GPL(devm_pm_opp_set_regulators);
   2252
   2253/**
   2254 * dev_pm_opp_set_clkname() - Set clk name for the device
   2255 * @dev: Device for which clk name is being set.
   2256 * @name: Clk name.
   2257 *
   2258 * In order to support OPP switching, OPP layer needs to get pointer to the
   2259 * clock for the device. Simple cases work fine without using this routine (i.e.
   2260 * by passing connection-id as NULL), but for a device with multiple clocks
   2261 * available, the OPP core needs to know the exact name of the clk to use.
   2262 *
   2263 * This must be called before any OPPs are initialized for the device.
   2264 */
   2265struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
   2266{
   2267	struct opp_table *opp_table;
   2268	int ret;
   2269
   2270	opp_table = _add_opp_table(dev, false);
   2271	if (IS_ERR(opp_table))
   2272		return opp_table;
   2273
   2274	/* This should be called before OPPs are initialized */
   2275	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
   2276		ret = -EBUSY;
   2277		goto err;
   2278	}
   2279
   2280	/* clk shouldn't be initialized at this point */
   2281	if (WARN_ON(opp_table->clk)) {
   2282		ret = -EBUSY;
   2283		goto err;
   2284	}
   2285
   2286	/* Find clk for the device */
   2287	opp_table->clk = clk_get(dev, name);
   2288	if (IS_ERR(opp_table->clk)) {
   2289		ret = dev_err_probe(dev, PTR_ERR(opp_table->clk),
   2290				    "%s: Couldn't find clock\n", __func__);
   2291		goto err;
   2292	}
   2293
   2294	return opp_table;
   2295
   2296err:
   2297	dev_pm_opp_put_opp_table(opp_table);
   2298
   2299	return ERR_PTR(ret);
   2300}
   2301EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
   2302
   2303/**
   2304 * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
   2305 * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
   2306 */
   2307void dev_pm_opp_put_clkname(struct opp_table *opp_table)
   2308{
   2309	if (unlikely(!opp_table))
   2310		return;
   2311
   2312	clk_put(opp_table->clk);
   2313	opp_table->clk = ERR_PTR(-EINVAL);
   2314
   2315	dev_pm_opp_put_opp_table(opp_table);
   2316}
   2317EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
   2318
   2319static void devm_pm_opp_clkname_release(void *data)
   2320{
   2321	dev_pm_opp_put_clkname(data);
   2322}
   2323
   2324/**
   2325 * devm_pm_opp_set_clkname() - Set clk name for the device
   2326 * @dev: Device for which clk name is being set.
   2327 * @name: Clk name.
   2328 *
   2329 * This is a resource-managed variant of dev_pm_opp_set_clkname().
   2330 *
   2331 * Return: 0 on success and errorno otherwise.
   2332 */
   2333int devm_pm_opp_set_clkname(struct device *dev, const char *name)
   2334{
   2335	struct opp_table *opp_table;
   2336
   2337	opp_table = dev_pm_opp_set_clkname(dev, name);
   2338	if (IS_ERR(opp_table))
   2339		return PTR_ERR(opp_table);
   2340
   2341	return devm_add_action_or_reset(dev, devm_pm_opp_clkname_release,
   2342					opp_table);
   2343}
   2344EXPORT_SYMBOL_GPL(devm_pm_opp_set_clkname);
   2345
   2346/**
   2347 * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
   2348 * @dev: Device for which the helper is getting registered.
   2349 * @set_opp: Custom set OPP helper.
   2350 *
   2351 * This is useful to support complex platforms (like platforms with multiple
   2352 * regulators per device), instead of the generic OPP set rate helper.
   2353 *
   2354 * This must be called before any OPPs are initialized for the device.
   2355 */
   2356struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
   2357			int (*set_opp)(struct dev_pm_set_opp_data *data))
   2358{
   2359	struct dev_pm_set_opp_data *data;
   2360	struct opp_table *opp_table;
   2361
   2362	if (!set_opp)
   2363		return ERR_PTR(-EINVAL);
   2364
   2365	opp_table = _add_opp_table(dev, false);
   2366	if (IS_ERR(opp_table))
   2367		return opp_table;
   2368
   2369	/* This should be called before OPPs are initialized */
   2370	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
   2371		dev_pm_opp_put_opp_table(opp_table);
   2372		return ERR_PTR(-EBUSY);
   2373	}
   2374
   2375	/* Another CPU that shares the OPP table has set the helper ? */
   2376	if (opp_table->set_opp)
   2377		return opp_table;
   2378
   2379	data = kzalloc(sizeof(*data), GFP_KERNEL);
   2380	if (!data)
   2381		return ERR_PTR(-ENOMEM);
   2382
   2383	mutex_lock(&opp_table->lock);
   2384	opp_table->set_opp_data = data;
   2385	if (opp_table->sod_supplies) {
   2386		data->old_opp.supplies = opp_table->sod_supplies;
   2387		data->new_opp.supplies = opp_table->sod_supplies +
   2388					 opp_table->regulator_count;
   2389	}
   2390	mutex_unlock(&opp_table->lock);
   2391
   2392	opp_table->set_opp = set_opp;
   2393
   2394	return opp_table;
   2395}
   2396EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
   2397
   2398/**
   2399 * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
   2400 *					   set_opp helper
   2401 * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
   2402 *
   2403 * Release resources blocked for platform specific set_opp helper.
   2404 */
   2405void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
   2406{
   2407	if (unlikely(!opp_table))
   2408		return;
   2409
   2410	opp_table->set_opp = NULL;
   2411
   2412	mutex_lock(&opp_table->lock);
   2413	kfree(opp_table->set_opp_data);
   2414	opp_table->set_opp_data = NULL;
   2415	mutex_unlock(&opp_table->lock);
   2416
   2417	dev_pm_opp_put_opp_table(opp_table);
   2418}
   2419EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
   2420
   2421static void devm_pm_opp_unregister_set_opp_helper(void *data)
   2422{
   2423	dev_pm_opp_unregister_set_opp_helper(data);
   2424}
   2425
   2426/**
   2427 * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper
   2428 * @dev: Device for which the helper is getting registered.
   2429 * @set_opp: Custom set OPP helper.
   2430 *
   2431 * This is a resource-managed version of dev_pm_opp_register_set_opp_helper().
   2432 *
   2433 * Return: 0 on success and errorno otherwise.
   2434 */
   2435int devm_pm_opp_register_set_opp_helper(struct device *dev,
   2436					int (*set_opp)(struct dev_pm_set_opp_data *data))
   2437{
   2438	struct opp_table *opp_table;
   2439
   2440	opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp);
   2441	if (IS_ERR(opp_table))
   2442		return PTR_ERR(opp_table);
   2443
   2444	return devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper,
   2445					opp_table);
   2446}
   2447EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper);
   2448
   2449static void _opp_detach_genpd(struct opp_table *opp_table)
   2450{
   2451	int index;
   2452
   2453	if (!opp_table->genpd_virt_devs)
   2454		return;
   2455
   2456	for (index = 0; index < opp_table->required_opp_count; index++) {
   2457		if (!opp_table->genpd_virt_devs[index])
   2458			continue;
   2459
   2460		dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
   2461		opp_table->genpd_virt_devs[index] = NULL;
   2462	}
   2463
   2464	kfree(opp_table->genpd_virt_devs);
   2465	opp_table->genpd_virt_devs = NULL;
   2466}
   2467
   2468/**
   2469 * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer
   2470 * @dev: Consumer device for which the genpd is getting attached.
   2471 * @names: Null terminated array of pointers containing names of genpd to attach.
   2472 * @virt_devs: Pointer to return the array of virtual devices.
   2473 *
   2474 * Multiple generic power domains for a device are supported with the help of
   2475 * virtual genpd devices, which are created for each consumer device - genpd
   2476 * pair. These are the device structures which are attached to the power domain
   2477 * and are required by the OPP core to set the performance state of the genpd.
   2478 * The same API also works for the case where single genpd is available and so
   2479 * we don't need to support that separately.
   2480 *
   2481 * This helper will normally be called by the consumer driver of the device
   2482 * "dev", as only that has details of the genpd names.
   2483 *
   2484 * This helper needs to be called once with a list of all genpd to attach.
   2485 * Otherwise the original device structure will be used instead by the OPP core.
   2486 *
   2487 * The order of entries in the names array must match the order in which
   2488 * "required-opps" are added in DT.
   2489 */
   2490struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
   2491		const char * const *names, struct device ***virt_devs)
   2492{
   2493	struct opp_table *opp_table;
   2494	struct device *virt_dev;
   2495	int index = 0, ret = -EINVAL;
   2496	const char * const *name = names;
   2497
   2498	opp_table = _add_opp_table(dev, false);
   2499	if (IS_ERR(opp_table))
   2500		return opp_table;
   2501
   2502	if (opp_table->genpd_virt_devs)
   2503		return opp_table;
   2504
   2505	/*
   2506	 * If the genpd's OPP table isn't already initialized, parsing of the
   2507	 * required-opps fail for dev. We should retry this after genpd's OPP
   2508	 * table is added.
   2509	 */
   2510	if (!opp_table->required_opp_count) {
   2511		ret = -EPROBE_DEFER;
   2512		goto put_table;
   2513	}
   2514
   2515	mutex_lock(&opp_table->genpd_virt_dev_lock);
   2516
   2517	opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
   2518					     sizeof(*opp_table->genpd_virt_devs),
   2519					     GFP_KERNEL);
   2520	if (!opp_table->genpd_virt_devs)
   2521		goto unlock;
   2522
   2523	while (*name) {
   2524		if (index >= opp_table->required_opp_count) {
   2525			dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n",
   2526				*name, opp_table->required_opp_count, index);
   2527			goto err;
   2528		}
   2529
   2530		virt_dev = dev_pm_domain_attach_by_name(dev, *name);
   2531		if (IS_ERR(virt_dev)) {
   2532			ret = PTR_ERR(virt_dev);
   2533			dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
   2534			goto err;
   2535		}
   2536
   2537		opp_table->genpd_virt_devs[index] = virt_dev;
   2538		index++;
   2539		name++;
   2540	}
   2541
   2542	if (virt_devs)
   2543		*virt_devs = opp_table->genpd_virt_devs;
   2544	mutex_unlock(&opp_table->genpd_virt_dev_lock);
   2545
   2546	return opp_table;
   2547
   2548err:
   2549	_opp_detach_genpd(opp_table);
   2550unlock:
   2551	mutex_unlock(&opp_table->genpd_virt_dev_lock);
   2552
   2553put_table:
   2554	dev_pm_opp_put_opp_table(opp_table);
   2555
   2556	return ERR_PTR(ret);
   2557}
   2558EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd);
   2559
   2560/**
   2561 * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device.
   2562 * @opp_table: OPP table returned by dev_pm_opp_attach_genpd().
   2563 *
   2564 * This detaches the genpd(s), resets the virtual device pointers, and puts the
   2565 * OPP table.
   2566 */
   2567void dev_pm_opp_detach_genpd(struct opp_table *opp_table)
   2568{
   2569	if (unlikely(!opp_table))
   2570		return;
   2571
   2572	/*
   2573	 * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
   2574	 * used in parallel.
   2575	 */
   2576	mutex_lock(&opp_table->genpd_virt_dev_lock);
   2577	_opp_detach_genpd(opp_table);
   2578	mutex_unlock(&opp_table->genpd_virt_dev_lock);
   2579
   2580	dev_pm_opp_put_opp_table(opp_table);
   2581}
   2582EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd);
   2583
   2584static void devm_pm_opp_detach_genpd(void *data)
   2585{
   2586	dev_pm_opp_detach_genpd(data);
   2587}
   2588
   2589/**
   2590 * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual
   2591 *			      device pointer
   2592 * @dev: Consumer device for which the genpd is getting attached.
   2593 * @names: Null terminated array of pointers containing names of genpd to attach.
   2594 * @virt_devs: Pointer to return the array of virtual devices.
   2595 *
   2596 * This is a resource-managed version of dev_pm_opp_attach_genpd().
   2597 *
   2598 * Return: 0 on success and errorno otherwise.
   2599 */
   2600int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
   2601			     struct device ***virt_devs)
   2602{
   2603	struct opp_table *opp_table;
   2604
   2605	opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs);
   2606	if (IS_ERR(opp_table))
   2607		return PTR_ERR(opp_table);
   2608
   2609	return devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd,
   2610					opp_table);
   2611}
   2612EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd);
   2613
   2614/**
   2615 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP.
   2616 * @src_table: OPP table which has @dst_table as one of its required OPP table.
   2617 * @dst_table: Required OPP table of the @src_table.
   2618 * @src_opp: OPP from the @src_table.
   2619 *
   2620 * This function returns the OPP (present in @dst_table) pointed out by the
   2621 * "required-opps" property of the @src_opp (present in @src_table).
   2622 *
   2623 * The callers are required to call dev_pm_opp_put() for the returned OPP after
   2624 * use.
   2625 *
   2626 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise.
   2627 */
   2628struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
   2629						 struct opp_table *dst_table,
   2630						 struct dev_pm_opp *src_opp)
   2631{
   2632	struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV);
   2633	int i;
   2634
   2635	if (!src_table || !dst_table || !src_opp ||
   2636	    !src_table->required_opp_tables)
   2637		return ERR_PTR(-EINVAL);
   2638
   2639	/* required-opps not fully initialized yet */
   2640	if (lazy_linking_pending(src_table))
   2641		return ERR_PTR(-EBUSY);
   2642
   2643	for (i = 0; i < src_table->required_opp_count; i++) {
   2644		if (src_table->required_opp_tables[i] == dst_table) {
   2645			mutex_lock(&src_table->lock);
   2646
   2647			list_for_each_entry(opp, &src_table->opp_list, node) {
   2648				if (opp == src_opp) {
   2649					dest_opp = opp->required_opps[i];
   2650					dev_pm_opp_get(dest_opp);
   2651					break;
   2652				}
   2653			}
   2654
   2655			mutex_unlock(&src_table->lock);
   2656			break;
   2657		}
   2658	}
   2659
   2660	if (IS_ERR(dest_opp)) {
   2661		pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__,
   2662		       src_table, dst_table);
   2663	}
   2664
   2665	return dest_opp;
   2666}
   2667EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp);
   2668
   2669/**
   2670 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
   2671 * @src_table: OPP table which has dst_table as one of its required OPP table.
   2672 * @dst_table: Required OPP table of the src_table.
   2673 * @pstate: Current performance state of the src_table.
   2674 *
   2675 * This Returns pstate of the OPP (present in @dst_table) pointed out by the
   2676 * "required-opps" property of the OPP (present in @src_table) which has
   2677 * performance state set to @pstate.
   2678 *
   2679 * Return: Zero or positive performance state on success, otherwise negative
   2680 * value on errors.
   2681 */
   2682int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
   2683				       struct opp_table *dst_table,
   2684				       unsigned int pstate)
   2685{
   2686	struct dev_pm_opp *opp;
   2687	int dest_pstate = -EINVAL;
   2688	int i;
   2689
   2690	/*
   2691	 * Normally the src_table will have the "required_opps" property set to
   2692	 * point to one of the OPPs in the dst_table, but in some cases the
   2693	 * genpd and its master have one to one mapping of performance states
   2694	 * and so none of them have the "required-opps" property set. Return the
   2695	 * pstate of the src_table as it is in such cases.
   2696	 */
   2697	if (!src_table || !src_table->required_opp_count)
   2698		return pstate;
   2699
   2700	/* required-opps not fully initialized yet */
   2701	if (lazy_linking_pending(src_table))
   2702		return -EBUSY;
   2703
   2704	for (i = 0; i < src_table->required_opp_count; i++) {
   2705		if (src_table->required_opp_tables[i]->np == dst_table->np)
   2706			break;
   2707	}
   2708
   2709	if (unlikely(i == src_table->required_opp_count)) {
   2710		pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
   2711		       __func__, src_table, dst_table);
   2712		return -EINVAL;
   2713	}
   2714
   2715	mutex_lock(&src_table->lock);
   2716
   2717	list_for_each_entry(opp, &src_table->opp_list, node) {
   2718		if (opp->pstate == pstate) {
   2719			dest_pstate = opp->required_opps[i]->pstate;
   2720			goto unlock;
   2721		}
   2722	}
   2723
   2724	pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
   2725	       dst_table);
   2726
   2727unlock:
   2728	mutex_unlock(&src_table->lock);
   2729
   2730	return dest_pstate;
   2731}
   2732
   2733/**
   2734 * dev_pm_opp_add()  - Add an OPP table from a table definitions
   2735 * @dev:	device for which we do this operation
   2736 * @freq:	Frequency in Hz for this OPP
   2737 * @u_volt:	Voltage in uVolts for this OPP
   2738 *
   2739 * This function adds an opp definition to the opp table and returns status.
   2740 * The opp is made available by default and it can be controlled using
   2741 * dev_pm_opp_enable/disable functions.
   2742 *
   2743 * Return:
   2744 * 0		On success OR
   2745 *		Duplicate OPPs (both freq and volt are same) and opp->available
   2746 * -EEXIST	Freq are same and volt are different OR
   2747 *		Duplicate OPPs (both freq and volt are same) and !opp->available
   2748 * -ENOMEM	Memory allocation failure
   2749 */
   2750int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
   2751{
   2752	struct opp_table *opp_table;
   2753	int ret;
   2754
   2755	opp_table = _add_opp_table(dev, true);
   2756	if (IS_ERR(opp_table))
   2757		return PTR_ERR(opp_table);
   2758
   2759	/* Fix regulator count for dynamic OPPs */
   2760	opp_table->regulator_count = 1;
   2761
   2762	ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
   2763	if (ret)
   2764		dev_pm_opp_put_opp_table(opp_table);
   2765
   2766	return ret;
   2767}
   2768EXPORT_SYMBOL_GPL(dev_pm_opp_add);
   2769
   2770/**
   2771 * _opp_set_availability() - helper to set the availability of an opp
   2772 * @dev:		device for which we do this operation
   2773 * @freq:		OPP frequency to modify availability
   2774 * @availability_req:	availability status requested for this opp
   2775 *
   2776 * Set the availability of an OPP, opp_{enable,disable} share a common logic
   2777 * which is isolated here.
   2778 *
   2779 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
   2780 * copy operation, returns 0 if no modification was done OR modification was
   2781 * successful.
   2782 */
   2783static int _opp_set_availability(struct device *dev, unsigned long freq,
   2784				 bool availability_req)
   2785{
   2786	struct opp_table *opp_table;
   2787	struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
   2788	int r = 0;
   2789
   2790	/* Find the opp_table */
   2791	opp_table = _find_opp_table(dev);
   2792	if (IS_ERR(opp_table)) {
   2793		r = PTR_ERR(opp_table);
   2794		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
   2795		return r;
   2796	}
   2797
   2798	mutex_lock(&opp_table->lock);
   2799
   2800	/* Do we have the frequency? */
   2801	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
   2802		if (tmp_opp->rate == freq) {
   2803			opp = tmp_opp;
   2804			break;
   2805		}
   2806	}
   2807
   2808	if (IS_ERR(opp)) {
   2809		r = PTR_ERR(opp);
   2810		goto unlock;
   2811	}
   2812
   2813	/* Is update really needed? */
   2814	if (opp->available == availability_req)
   2815		goto unlock;
   2816
   2817	opp->available = availability_req;
   2818
   2819	dev_pm_opp_get(opp);
   2820	mutex_unlock(&opp_table->lock);
   2821
   2822	/* Notify the change of the OPP availability */
   2823	if (availability_req)
   2824		blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
   2825					     opp);
   2826	else
   2827		blocking_notifier_call_chain(&opp_table->head,
   2828					     OPP_EVENT_DISABLE, opp);
   2829
   2830	dev_pm_opp_put(opp);
   2831	goto put_table;
   2832
   2833unlock:
   2834	mutex_unlock(&opp_table->lock);
   2835put_table:
   2836	dev_pm_opp_put_opp_table(opp_table);
   2837	return r;
   2838}
   2839
   2840/**
   2841 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
   2842 * @dev:		device for which we do this operation
   2843 * @freq:		OPP frequency to adjust voltage of
   2844 * @u_volt:		new OPP target voltage
   2845 * @u_volt_min:		new OPP min voltage
   2846 * @u_volt_max:		new OPP max voltage
   2847 *
   2848 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
   2849 * copy operation, returns 0 if no modifcation was done OR modification was
   2850 * successful.
   2851 */
   2852int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
   2853			      unsigned long u_volt, unsigned long u_volt_min,
   2854			      unsigned long u_volt_max)
   2855
   2856{
   2857	struct opp_table *opp_table;
   2858	struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
   2859	int r = 0;
   2860
   2861	/* Find the opp_table */
   2862	opp_table = _find_opp_table(dev);
   2863	if (IS_ERR(opp_table)) {
   2864		r = PTR_ERR(opp_table);
   2865		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
   2866		return r;
   2867	}
   2868
   2869	mutex_lock(&opp_table->lock);
   2870
   2871	/* Do we have the frequency? */
   2872	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
   2873		if (tmp_opp->rate == freq) {
   2874			opp = tmp_opp;
   2875			break;
   2876		}
   2877	}
   2878
   2879	if (IS_ERR(opp)) {
   2880		r = PTR_ERR(opp);
   2881		goto adjust_unlock;
   2882	}
   2883
   2884	/* Is update really needed? */
   2885	if (opp->supplies->u_volt == u_volt)
   2886		goto adjust_unlock;
   2887
   2888	opp->supplies->u_volt = u_volt;
   2889	opp->supplies->u_volt_min = u_volt_min;
   2890	opp->supplies->u_volt_max = u_volt_max;
   2891
   2892	dev_pm_opp_get(opp);
   2893	mutex_unlock(&opp_table->lock);
   2894
   2895	/* Notify the voltage change of the OPP */
   2896	blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
   2897				     opp);
   2898
   2899	dev_pm_opp_put(opp);
   2900	goto adjust_put_table;
   2901
   2902adjust_unlock:
   2903	mutex_unlock(&opp_table->lock);
   2904adjust_put_table:
   2905	dev_pm_opp_put_opp_table(opp_table);
   2906	return r;
   2907}
   2908EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
   2909
   2910/**
   2911 * dev_pm_opp_enable() - Enable a specific OPP
   2912 * @dev:	device for which we do this operation
   2913 * @freq:	OPP frequency to enable
   2914 *
   2915 * Enables a provided opp. If the operation is valid, this returns 0, else the
   2916 * corresponding error value. It is meant to be used for users an OPP available
   2917 * after being temporarily made unavailable with dev_pm_opp_disable.
   2918 *
   2919 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
   2920 * copy operation, returns 0 if no modification was done OR modification was
   2921 * successful.
   2922 */
   2923int dev_pm_opp_enable(struct device *dev, unsigned long freq)
   2924{
   2925	return _opp_set_availability(dev, freq, true);
   2926}
   2927EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
   2928
   2929/**
   2930 * dev_pm_opp_disable() - Disable a specific OPP
   2931 * @dev:	device for which we do this operation
   2932 * @freq:	OPP frequency to disable
   2933 *
   2934 * Disables a provided opp. If the operation is valid, this returns
   2935 * 0, else the corresponding error value. It is meant to be a temporary
   2936 * control by users to make this OPP not available until the circumstances are
   2937 * right to make it available again (with a call to dev_pm_opp_enable).
   2938 *
   2939 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
   2940 * copy operation, returns 0 if no modification was done OR modification was
   2941 * successful.
   2942 */
   2943int dev_pm_opp_disable(struct device *dev, unsigned long freq)
   2944{
   2945	return _opp_set_availability(dev, freq, false);
   2946}
   2947EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
   2948
   2949/**
   2950 * dev_pm_opp_register_notifier() - Register OPP notifier for the device
   2951 * @dev:	Device for which notifier needs to be registered
   2952 * @nb:		Notifier block to be registered
   2953 *
   2954 * Return: 0 on success or a negative error value.
   2955 */
   2956int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
   2957{
   2958	struct opp_table *opp_table;
   2959	int ret;
   2960
   2961	opp_table = _find_opp_table(dev);
   2962	if (IS_ERR(opp_table))
   2963		return PTR_ERR(opp_table);
   2964
   2965	ret = blocking_notifier_chain_register(&opp_table->head, nb);
   2966
   2967	dev_pm_opp_put_opp_table(opp_table);
   2968
   2969	return ret;
   2970}
   2971EXPORT_SYMBOL(dev_pm_opp_register_notifier);
   2972
   2973/**
   2974 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device
   2975 * @dev:	Device for which notifier needs to be unregistered
   2976 * @nb:		Notifier block to be unregistered
   2977 *
   2978 * Return: 0 on success or a negative error value.
   2979 */
   2980int dev_pm_opp_unregister_notifier(struct device *dev,
   2981				   struct notifier_block *nb)
   2982{
   2983	struct opp_table *opp_table;
   2984	int ret;
   2985
   2986	opp_table = _find_opp_table(dev);
   2987	if (IS_ERR(opp_table))
   2988		return PTR_ERR(opp_table);
   2989
   2990	ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
   2991
   2992	dev_pm_opp_put_opp_table(opp_table);
   2993
   2994	return ret;
   2995}
   2996EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
   2997
   2998/**
   2999 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
   3000 * @dev:	device pointer used to lookup OPP table.
   3001 *
   3002 * Free both OPPs created using static entries present in DT and the
   3003 * dynamically added entries.
   3004 */
   3005void dev_pm_opp_remove_table(struct device *dev)
   3006{
   3007	struct opp_table *opp_table;
   3008
   3009	/* Check for existing table for 'dev' */
   3010	opp_table = _find_opp_table(dev);
   3011	if (IS_ERR(opp_table)) {
   3012		int error = PTR_ERR(opp_table);
   3013
   3014		if (error != -ENODEV)
   3015			WARN(1, "%s: opp_table: %d\n",
   3016			     IS_ERR_OR_NULL(dev) ?
   3017					"Invalid device" : dev_name(dev),
   3018			     error);
   3019		return;
   3020	}
   3021
   3022	/*
   3023	 * Drop the extra reference only if the OPP table was successfully added
   3024	 * with dev_pm_opp_of_add_table() earlier.
   3025	 **/
   3026	if (_opp_remove_all_static(opp_table))
   3027		dev_pm_opp_put_opp_table(opp_table);
   3028
   3029	/* Drop reference taken by _find_opp_table() */
   3030	dev_pm_opp_put_opp_table(opp_table);
   3031}
   3032EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
   3033
   3034/**
   3035 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
   3036 * @dev:	device for which we do this operation
   3037 *
   3038 * Sync voltage state of the OPP table regulators.
   3039 *
   3040 * Return: 0 on success or a negative error value.
   3041 */
   3042int dev_pm_opp_sync_regulators(struct device *dev)
   3043{
   3044	struct opp_table *opp_table;
   3045	struct regulator *reg;
   3046	int i, ret = 0;
   3047
   3048	/* Device may not have OPP table */
   3049	opp_table = _find_opp_table(dev);
   3050	if (IS_ERR(opp_table))
   3051		return 0;
   3052
   3053	/* Regulator may not be required for the device */
   3054	if (unlikely(!opp_table->regulators))
   3055		goto put_table;
   3056
   3057	/* Nothing to sync if voltage wasn't changed */
   3058	if (!opp_table->enabled)
   3059		goto put_table;
   3060
   3061	for (i = 0; i < opp_table->regulator_count; i++) {
   3062		reg = opp_table->regulators[i];
   3063		ret = regulator_sync_voltage(reg);
   3064		if (ret)
   3065			break;
   3066	}
   3067put_table:
   3068	/* Drop reference taken by _find_opp_table() */
   3069	dev_pm_opp_put_opp_table(opp_table);
   3070
   3071	return ret;
   3072}
   3073EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);