cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

devfreq.c (57844B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
      4 *	    for Non-CPU Devices.
      5 *
      6 * Copyright (C) 2011 Samsung Electronics
      7 *	MyungJoo Ham <myungjoo.ham@samsung.com>
      8 */
      9
     10#include <linux/kernel.h>
     11#include <linux/kmod.h>
     12#include <linux/sched.h>
     13#include <linux/debugfs.h>
     14#include <linux/devfreq_cooling.h>
     15#include <linux/errno.h>
     16#include <linux/err.h>
     17#include <linux/init.h>
     18#include <linux/export.h>
     19#include <linux/slab.h>
     20#include <linux/stat.h>
     21#include <linux/pm_opp.h>
     22#include <linux/devfreq.h>
     23#include <linux/workqueue.h>
     24#include <linux/platform_device.h>
     25#include <linux/list.h>
     26#include <linux/printk.h>
     27#include <linux/hrtimer.h>
     28#include <linux/of.h>
     29#include <linux/pm_qos.h>
     30#include <linux/units.h>
     31#include "governor.h"
     32
     33#define CREATE_TRACE_POINTS
     34#include <trace/events/devfreq.h>
     35
     36#define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false)
     37#define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false)
     38
     39static struct class *devfreq_class;
     40static struct dentry *devfreq_debugfs;
     41
     42/*
     43 * devfreq core provides delayed work based load monitoring helper
     44 * functions. Governors can use these or can implement their own
     45 * monitoring mechanism.
     46 */
     47static struct workqueue_struct *devfreq_wq;
     48
     49/* The list of all device-devfreq governors */
     50static LIST_HEAD(devfreq_governor_list);
     51/* The list of all device-devfreq */
     52static LIST_HEAD(devfreq_list);
     53static DEFINE_MUTEX(devfreq_list_lock);
     54
     55static const char timer_name[][DEVFREQ_NAME_LEN] = {
     56	[DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
     57	[DEVFREQ_TIMER_DELAYED] = { "delayed" },
     58};
     59
     60/**
     61 * find_device_devfreq() - find devfreq struct using device pointer
     62 * @dev:	device pointer used to lookup device devfreq.
     63 *
     64 * Search the list of device devfreqs and return the matched device's
     65 * devfreq info. devfreq_list_lock should be held by the caller.
     66 */
     67static struct devfreq *find_device_devfreq(struct device *dev)
     68{
     69	struct devfreq *tmp_devfreq;
     70
     71	lockdep_assert_held(&devfreq_list_lock);
     72
     73	if (IS_ERR_OR_NULL(dev)) {
     74		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
     75		return ERR_PTR(-EINVAL);
     76	}
     77
     78	list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
     79		if (tmp_devfreq->dev.parent == dev)
     80			return tmp_devfreq;
     81	}
     82
     83	return ERR_PTR(-ENODEV);
     84}
     85
     86static unsigned long find_available_min_freq(struct devfreq *devfreq)
     87{
     88	struct dev_pm_opp *opp;
     89	unsigned long min_freq = 0;
     90
     91	opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
     92	if (IS_ERR(opp))
     93		min_freq = 0;
     94	else
     95		dev_pm_opp_put(opp);
     96
     97	return min_freq;
     98}
     99
    100static unsigned long find_available_max_freq(struct devfreq *devfreq)
    101{
    102	struct dev_pm_opp *opp;
    103	unsigned long max_freq = ULONG_MAX;
    104
    105	opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
    106	if (IS_ERR(opp))
    107		max_freq = 0;
    108	else
    109		dev_pm_opp_put(opp);
    110
    111	return max_freq;
    112}
    113
    114/**
    115 * devfreq_get_freq_range() - Get the current freq range
    116 * @devfreq:	the devfreq instance
    117 * @min_freq:	the min frequency
    118 * @max_freq:	the max frequency
    119 *
    120 * This takes into consideration all constraints.
    121 */
    122void devfreq_get_freq_range(struct devfreq *devfreq,
    123			    unsigned long *min_freq,
    124			    unsigned long *max_freq)
    125{
    126	unsigned long *freq_table = devfreq->freq_table;
    127	s32 qos_min_freq, qos_max_freq;
    128
    129	lockdep_assert_held(&devfreq->lock);
    130
    131	/*
    132	 * Initialize minimum/maximum frequency from freq table.
    133	 * The devfreq drivers can initialize this in either ascending or
    134	 * descending order and devfreq core supports both.
    135	 */
    136	if (freq_table[0] < freq_table[devfreq->max_state - 1]) {
    137		*min_freq = freq_table[0];
    138		*max_freq = freq_table[devfreq->max_state - 1];
    139	} else {
    140		*min_freq = freq_table[devfreq->max_state - 1];
    141		*max_freq = freq_table[0];
    142	}
    143
    144	/* Apply constraints from PM QoS */
    145	qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
    146					     DEV_PM_QOS_MIN_FREQUENCY);
    147	qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
    148					     DEV_PM_QOS_MAX_FREQUENCY);
    149	*min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
    150	if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
    151		*max_freq = min(*max_freq,
    152				(unsigned long)HZ_PER_KHZ * qos_max_freq);
    153
    154	/* Apply constraints from OPP interface */
    155	*min_freq = max(*min_freq, devfreq->scaling_min_freq);
    156	*max_freq = min(*max_freq, devfreq->scaling_max_freq);
    157
    158	if (*min_freq > *max_freq)
    159		*min_freq = *max_freq;
    160}
    161EXPORT_SYMBOL(devfreq_get_freq_range);
    162
    163/**
    164 * devfreq_get_freq_level() - Lookup freq_table for the frequency
    165 * @devfreq:	the devfreq instance
    166 * @freq:	the target frequency
    167 */
    168static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
    169{
    170	int lev;
    171
    172	for (lev = 0; lev < devfreq->max_state; lev++)
    173		if (freq == devfreq->freq_table[lev])
    174			return lev;
    175
    176	return -EINVAL;
    177}
    178
    179static int set_freq_table(struct devfreq *devfreq)
    180{
    181	struct dev_pm_opp *opp;
    182	unsigned long freq;
    183	int i, count;
    184
    185	/* Initialize the freq_table from OPP table */
    186	count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
    187	if (count <= 0)
    188		return -EINVAL;
    189
    190	devfreq->max_state = count;
    191	devfreq->freq_table = devm_kcalloc(devfreq->dev.parent,
    192					   devfreq->max_state,
    193					   sizeof(*devfreq->freq_table),
    194					   GFP_KERNEL);
    195	if (!devfreq->freq_table)
    196		return -ENOMEM;
    197
    198	for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
    199		opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
    200		if (IS_ERR(opp)) {
    201			devm_kfree(devfreq->dev.parent, devfreq->freq_table);
    202			return PTR_ERR(opp);
    203		}
    204		dev_pm_opp_put(opp);
    205		devfreq->freq_table[i] = freq;
    206	}
    207
    208	return 0;
    209}
    210
    211/**
    212 * devfreq_update_status() - Update statistics of devfreq behavior
    213 * @devfreq:	the devfreq instance
    214 * @freq:	the update target frequency
    215 */
    216int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
    217{
    218	int lev, prev_lev, ret = 0;
    219	u64 cur_time;
    220
    221	lockdep_assert_held(&devfreq->lock);
    222	cur_time = get_jiffies_64();
    223
    224	/* Immediately exit if previous_freq is not initialized yet. */
    225	if (!devfreq->previous_freq)
    226		goto out;
    227
    228	prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
    229	if (prev_lev < 0) {
    230		ret = prev_lev;
    231		goto out;
    232	}
    233
    234	devfreq->stats.time_in_state[prev_lev] +=
    235			cur_time - devfreq->stats.last_update;
    236
    237	lev = devfreq_get_freq_level(devfreq, freq);
    238	if (lev < 0) {
    239		ret = lev;
    240		goto out;
    241	}
    242
    243	if (lev != prev_lev) {
    244		devfreq->stats.trans_table[
    245			(prev_lev * devfreq->max_state) + lev]++;
    246		devfreq->stats.total_trans++;
    247	}
    248
    249out:
    250	devfreq->stats.last_update = cur_time;
    251	return ret;
    252}
    253EXPORT_SYMBOL(devfreq_update_status);
    254
    255/**
    256 * find_devfreq_governor() - find devfreq governor from name
    257 * @name:	name of the governor
    258 *
    259 * Search the list of devfreq governors and return the matched
    260 * governor's pointer. devfreq_list_lock should be held by the caller.
    261 */
    262static struct devfreq_governor *find_devfreq_governor(const char *name)
    263{
    264	struct devfreq_governor *tmp_governor;
    265
    266	lockdep_assert_held(&devfreq_list_lock);
    267
    268	if (IS_ERR_OR_NULL(name)) {
    269		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
    270		return ERR_PTR(-EINVAL);
    271	}
    272
    273	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
    274		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
    275			return tmp_governor;
    276	}
    277
    278	return ERR_PTR(-ENODEV);
    279}
    280
    281/**
    282 * try_then_request_governor() - Try to find the governor and request the
    283 *                               module if is not found.
    284 * @name:	name of the governor
    285 *
    286 * Search the list of devfreq governors and request the module and try again
    287 * if is not found. This can happen when both drivers (the governor driver
    288 * and the driver that call devfreq_add_device) are built as modules.
    289 * devfreq_list_lock should be held by the caller. Returns the matched
    290 * governor's pointer or an error pointer.
    291 */
    292static struct devfreq_governor *try_then_request_governor(const char *name)
    293{
    294	struct devfreq_governor *governor;
    295	int err = 0;
    296
    297	lockdep_assert_held(&devfreq_list_lock);
    298
    299	if (IS_ERR_OR_NULL(name)) {
    300		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
    301		return ERR_PTR(-EINVAL);
    302	}
    303
    304	governor = find_devfreq_governor(name);
    305	if (IS_ERR(governor)) {
    306		mutex_unlock(&devfreq_list_lock);
    307
    308		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
    309			     DEVFREQ_NAME_LEN))
    310			err = request_module("governor_%s", "simpleondemand");
    311		else
    312			err = request_module("governor_%s", name);
    313		/* Restore previous state before return */
    314		mutex_lock(&devfreq_list_lock);
    315		if (err)
    316			return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
    317
    318		governor = find_devfreq_governor(name);
    319	}
    320
    321	return governor;
    322}
    323
    324static int devfreq_notify_transition(struct devfreq *devfreq,
    325		struct devfreq_freqs *freqs, unsigned int state)
    326{
    327	if (!devfreq)
    328		return -EINVAL;
    329
    330	switch (state) {
    331	case DEVFREQ_PRECHANGE:
    332		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
    333				DEVFREQ_PRECHANGE, freqs);
    334		break;
    335
    336	case DEVFREQ_POSTCHANGE:
    337		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
    338				DEVFREQ_POSTCHANGE, freqs);
    339		break;
    340	default:
    341		return -EINVAL;
    342	}
    343
    344	return 0;
    345}
    346
    347static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
    348			      u32 flags)
    349{
    350	struct devfreq_freqs freqs;
    351	unsigned long cur_freq;
    352	int err = 0;
    353
    354	if (devfreq->profile->get_cur_freq)
    355		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
    356	else
    357		cur_freq = devfreq->previous_freq;
    358
    359	freqs.old = cur_freq;
    360	freqs.new = new_freq;
    361	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
    362
    363	err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
    364	if (err) {
    365		freqs.new = cur_freq;
    366		devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
    367		return err;
    368	}
    369
    370	/*
    371	 * Print devfreq_frequency trace information between DEVFREQ_PRECHANGE
    372	 * and DEVFREQ_POSTCHANGE because for showing the correct frequency
    373	 * change order of between devfreq device and passive devfreq device.
    374	 */
    375	if (trace_devfreq_frequency_enabled() && new_freq != cur_freq)
    376		trace_devfreq_frequency(devfreq, new_freq, cur_freq);
    377
    378	freqs.new = new_freq;
    379	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
    380
    381	if (devfreq_update_status(devfreq, new_freq))
    382		dev_warn(&devfreq->dev,
    383			 "Couldn't update frequency transition information.\n");
    384
    385	devfreq->previous_freq = new_freq;
    386
    387	if (devfreq->suspend_freq)
    388		devfreq->resume_freq = new_freq;
    389
    390	return err;
    391}
    392
    393/**
    394 * devfreq_update_target() - Reevaluate the device and configure frequency
    395 *			   on the final stage.
    396 * @devfreq:	the devfreq instance.
    397 * @freq:	the new frequency of parent device. This argument
    398 *		is only used for devfreq device using passive governor.
    399 *
    400 * Note: Lock devfreq->lock before calling devfreq_update_target. This function
    401 *	 should be only used by both update_devfreq() and devfreq governors.
    402 */
    403int devfreq_update_target(struct devfreq *devfreq, unsigned long freq)
    404{
    405	unsigned long min_freq, max_freq;
    406	int err = 0;
    407	u32 flags = 0;
    408
    409	lockdep_assert_held(&devfreq->lock);
    410
    411	if (!devfreq->governor)
    412		return -EINVAL;
    413
    414	/* Reevaluate the proper frequency */
    415	err = devfreq->governor->get_target_freq(devfreq, &freq);
    416	if (err)
    417		return err;
    418	devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
    419
    420	if (freq < min_freq) {
    421		freq = min_freq;
    422		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
    423	}
    424	if (freq > max_freq) {
    425		freq = max_freq;
    426		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
    427	}
    428
    429	return devfreq_set_target(devfreq, freq, flags);
    430}
    431EXPORT_SYMBOL(devfreq_update_target);
    432
    433/* Load monitoring helper functions for governors use */
    434
    435/**
    436 * update_devfreq() - Reevaluate the device and configure frequency.
    437 * @devfreq:	the devfreq instance.
    438 *
    439 * Note: Lock devfreq->lock before calling update_devfreq
    440 *	 This function is exported for governors.
    441 */
    442int update_devfreq(struct devfreq *devfreq)
    443{
    444	return devfreq_update_target(devfreq, 0L);
    445}
    446EXPORT_SYMBOL(update_devfreq);
    447
    448/**
    449 * devfreq_monitor() - Periodically poll devfreq objects.
    450 * @work:	the work struct used to run devfreq_monitor periodically.
    451 *
    452 */
    453static void devfreq_monitor(struct work_struct *work)
    454{
    455	int err;
    456	struct devfreq *devfreq = container_of(work,
    457					struct devfreq, work.work);
    458
    459	mutex_lock(&devfreq->lock);
    460	err = update_devfreq(devfreq);
    461	if (err)
    462		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
    463
    464	queue_delayed_work(devfreq_wq, &devfreq->work,
    465				msecs_to_jiffies(devfreq->profile->polling_ms));
    466	mutex_unlock(&devfreq->lock);
    467
    468	trace_devfreq_monitor(devfreq);
    469}
    470
    471/**
    472 * devfreq_monitor_start() - Start load monitoring of devfreq instance
    473 * @devfreq:	the devfreq instance.
    474 *
    475 * Helper function for starting devfreq device load monitoring. By
    476 * default delayed work based monitoring is supported. Function
    477 * to be called from governor in response to DEVFREQ_GOV_START
    478 * event when device is added to devfreq framework.
    479 */
    480void devfreq_monitor_start(struct devfreq *devfreq)
    481{
    482	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
    483		return;
    484
    485	switch (devfreq->profile->timer) {
    486	case DEVFREQ_TIMER_DEFERRABLE:
    487		INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
    488		break;
    489	case DEVFREQ_TIMER_DELAYED:
    490		INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
    491		break;
    492	default:
    493		return;
    494	}
    495
    496	if (devfreq->profile->polling_ms)
    497		queue_delayed_work(devfreq_wq, &devfreq->work,
    498			msecs_to_jiffies(devfreq->profile->polling_ms));
    499}
    500EXPORT_SYMBOL(devfreq_monitor_start);
    501
    502/**
    503 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
    504 * @devfreq:	the devfreq instance.
    505 *
    506 * Helper function to stop devfreq device load monitoring. Function
    507 * to be called from governor in response to DEVFREQ_GOV_STOP
    508 * event when device is removed from devfreq framework.
    509 */
    510void devfreq_monitor_stop(struct devfreq *devfreq)
    511{
    512	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
    513		return;
    514
    515	cancel_delayed_work_sync(&devfreq->work);
    516}
    517EXPORT_SYMBOL(devfreq_monitor_stop);
    518
    519/**
    520 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
    521 * @devfreq:	the devfreq instance.
    522 *
    523 * Helper function to suspend devfreq device load monitoring. Function
    524 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
    525 * event or when polling interval is set to zero.
    526 *
    527 * Note: Though this function is same as devfreq_monitor_stop(),
    528 * intentionally kept separate to provide hooks for collecting
    529 * transition statistics.
    530 */
    531void devfreq_monitor_suspend(struct devfreq *devfreq)
    532{
    533	mutex_lock(&devfreq->lock);
    534	if (devfreq->stop_polling) {
    535		mutex_unlock(&devfreq->lock);
    536		return;
    537	}
    538
    539	devfreq_update_status(devfreq, devfreq->previous_freq);
    540	devfreq->stop_polling = true;
    541	mutex_unlock(&devfreq->lock);
    542
    543	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
    544		return;
    545
    546	cancel_delayed_work_sync(&devfreq->work);
    547}
    548EXPORT_SYMBOL(devfreq_monitor_suspend);
    549
    550/**
    551 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
    552 * @devfreq:    the devfreq instance.
    553 *
    554 * Helper function to resume devfreq device load monitoring. Function
    555 * to be called from governor in response to DEVFREQ_GOV_RESUME
    556 * event or when polling interval is set to non-zero.
    557 */
    558void devfreq_monitor_resume(struct devfreq *devfreq)
    559{
    560	unsigned long freq;
    561
    562	mutex_lock(&devfreq->lock);
    563
    564	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
    565		goto out_update;
    566
    567	if (!devfreq->stop_polling)
    568		goto out;
    569
    570	if (!delayed_work_pending(&devfreq->work) &&
    571			devfreq->profile->polling_ms)
    572		queue_delayed_work(devfreq_wq, &devfreq->work,
    573			msecs_to_jiffies(devfreq->profile->polling_ms));
    574
    575out_update:
    576	devfreq->stats.last_update = get_jiffies_64();
    577	devfreq->stop_polling = false;
    578
    579	if (devfreq->profile->get_cur_freq &&
    580		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
    581		devfreq->previous_freq = freq;
    582
    583out:
    584	mutex_unlock(&devfreq->lock);
    585}
    586EXPORT_SYMBOL(devfreq_monitor_resume);
    587
    588/**
    589 * devfreq_update_interval() - Update device devfreq monitoring interval
    590 * @devfreq:    the devfreq instance.
    591 * @delay:      new polling interval to be set.
    592 *
    593 * Helper function to set new load monitoring polling interval. Function
    594 * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
    595 */
    596void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
    597{
    598	unsigned int cur_delay = devfreq->profile->polling_ms;
    599	unsigned int new_delay = *delay;
    600
    601	mutex_lock(&devfreq->lock);
    602	devfreq->profile->polling_ms = new_delay;
    603
    604	if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
    605		goto out;
    606
    607	if (devfreq->stop_polling)
    608		goto out;
    609
    610	/* if new delay is zero, stop polling */
    611	if (!new_delay) {
    612		mutex_unlock(&devfreq->lock);
    613		cancel_delayed_work_sync(&devfreq->work);
    614		return;
    615	}
    616
    617	/* if current delay is zero, start polling with new delay */
    618	if (!cur_delay) {
    619		queue_delayed_work(devfreq_wq, &devfreq->work,
    620			msecs_to_jiffies(devfreq->profile->polling_ms));
    621		goto out;
    622	}
    623
    624	/* if current delay is greater than new delay, restart polling */
    625	if (cur_delay > new_delay) {
    626		mutex_unlock(&devfreq->lock);
    627		cancel_delayed_work_sync(&devfreq->work);
    628		mutex_lock(&devfreq->lock);
    629		if (!devfreq->stop_polling)
    630			queue_delayed_work(devfreq_wq, &devfreq->work,
    631				msecs_to_jiffies(devfreq->profile->polling_ms));
    632	}
    633out:
    634	mutex_unlock(&devfreq->lock);
    635}
    636EXPORT_SYMBOL(devfreq_update_interval);
    637
    638/**
    639 * devfreq_notifier_call() - Notify that the device frequency requirements
    640 *			     has been changed out of devfreq framework.
    641 * @nb:		the notifier_block (supposed to be devfreq->nb)
    642 * @type:	not used
    643 * @devp:	not used
    644 *
    645 * Called by a notifier that uses devfreq->nb.
    646 */
    647static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
    648				 void *devp)
    649{
    650	struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
    651	int err = -EINVAL;
    652
    653	mutex_lock(&devfreq->lock);
    654
    655	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
    656	if (!devfreq->scaling_min_freq)
    657		goto out;
    658
    659	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
    660	if (!devfreq->scaling_max_freq) {
    661		devfreq->scaling_max_freq = ULONG_MAX;
    662		goto out;
    663	}
    664
    665	err = update_devfreq(devfreq);
    666
    667out:
    668	mutex_unlock(&devfreq->lock);
    669	if (err)
    670		dev_err(devfreq->dev.parent,
    671			"failed to update frequency from OPP notifier (%d)\n",
    672			err);
    673
    674	return NOTIFY_OK;
    675}
    676
    677/**
    678 * qos_notifier_call() - Common handler for QoS constraints.
    679 * @devfreq:    the devfreq instance.
    680 */
    681static int qos_notifier_call(struct devfreq *devfreq)
    682{
    683	int err;
    684
    685	mutex_lock(&devfreq->lock);
    686	err = update_devfreq(devfreq);
    687	mutex_unlock(&devfreq->lock);
    688	if (err)
    689		dev_err(devfreq->dev.parent,
    690			"failed to update frequency from PM QoS (%d)\n",
    691			err);
    692
    693	return NOTIFY_OK;
    694}
    695
    696/**
    697 * qos_min_notifier_call() - Callback for QoS min_freq changes.
    698 * @nb:		Should be devfreq->nb_min
    699 */
    700static int qos_min_notifier_call(struct notifier_block *nb,
    701					 unsigned long val, void *ptr)
    702{
    703	return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
    704}
    705
    706/**
    707 * qos_max_notifier_call() - Callback for QoS max_freq changes.
    708 * @nb:		Should be devfreq->nb_max
    709 */
    710static int qos_max_notifier_call(struct notifier_block *nb,
    711					 unsigned long val, void *ptr)
    712{
    713	return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
    714}
    715
    716/**
    717 * devfreq_dev_release() - Callback for struct device to release the device.
    718 * @dev:	the devfreq device
    719 *
    720 * Remove devfreq from the list and release its resources.
    721 */
    722static void devfreq_dev_release(struct device *dev)
    723{
    724	struct devfreq *devfreq = to_devfreq(dev);
    725	int err;
    726
    727	mutex_lock(&devfreq_list_lock);
    728	list_del(&devfreq->node);
    729	mutex_unlock(&devfreq_list_lock);
    730
    731	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
    732					 DEV_PM_QOS_MAX_FREQUENCY);
    733	if (err && err != -ENOENT)
    734		dev_warn(dev->parent,
    735			"Failed to remove max_freq notifier: %d\n", err);
    736	err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
    737					 DEV_PM_QOS_MIN_FREQUENCY);
    738	if (err && err != -ENOENT)
    739		dev_warn(dev->parent,
    740			"Failed to remove min_freq notifier: %d\n", err);
    741
    742	if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
    743		err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
    744		if (err < 0)
    745			dev_warn(dev->parent,
    746				"Failed to remove max_freq request: %d\n", err);
    747	}
    748	if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
    749		err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
    750		if (err < 0)
    751			dev_warn(dev->parent,
    752				"Failed to remove min_freq request: %d\n", err);
    753	}
    754
    755	if (devfreq->profile->exit)
    756		devfreq->profile->exit(devfreq->dev.parent);
    757
    758	if (devfreq->opp_table)
    759		dev_pm_opp_put_opp_table(devfreq->opp_table);
    760
    761	mutex_destroy(&devfreq->lock);
    762	kfree(devfreq);
    763}
    764
    765static void create_sysfs_files(struct devfreq *devfreq,
    766				const struct devfreq_governor *gov);
    767static void remove_sysfs_files(struct devfreq *devfreq,
    768				const struct devfreq_governor *gov);
    769
    770/**
    771 * devfreq_add_device() - Add devfreq feature to the device
    772 * @dev:	the device to add devfreq feature.
    773 * @profile:	device-specific profile to run devfreq.
    774 * @governor_name:	name of the policy to choose frequency.
    775 * @data:	private data for the governor. The devfreq framework does not
    776 *		touch this value.
    777 */
    778struct devfreq *devfreq_add_device(struct device *dev,
    779				   struct devfreq_dev_profile *profile,
    780				   const char *governor_name,
    781				   void *data)
    782{
    783	struct devfreq *devfreq;
    784	struct devfreq_governor *governor;
    785	unsigned long min_freq, max_freq;
    786	int err = 0;
    787
    788	if (!dev || !profile || !governor_name) {
    789		dev_err(dev, "%s: Invalid parameters.\n", __func__);
    790		return ERR_PTR(-EINVAL);
    791	}
    792
    793	mutex_lock(&devfreq_list_lock);
    794	devfreq = find_device_devfreq(dev);
    795	mutex_unlock(&devfreq_list_lock);
    796	if (!IS_ERR(devfreq)) {
    797		dev_err(dev, "%s: devfreq device already exists!\n",
    798			__func__);
    799		err = -EINVAL;
    800		goto err_out;
    801	}
    802
    803	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
    804	if (!devfreq) {
    805		err = -ENOMEM;
    806		goto err_out;
    807	}
    808
    809	mutex_init(&devfreq->lock);
    810	mutex_lock(&devfreq->lock);
    811	devfreq->dev.parent = dev;
    812	devfreq->dev.class = devfreq_class;
    813	devfreq->dev.release = devfreq_dev_release;
    814	INIT_LIST_HEAD(&devfreq->node);
    815	devfreq->profile = profile;
    816	devfreq->previous_freq = profile->initial_freq;
    817	devfreq->last_status.current_frequency = profile->initial_freq;
    818	devfreq->data = data;
    819	devfreq->nb.notifier_call = devfreq_notifier_call;
    820
    821	if (devfreq->profile->timer < 0
    822		|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
    823		mutex_unlock(&devfreq->lock);
    824		err = -EINVAL;
    825		goto err_dev;
    826	}
    827
    828	if (!devfreq->profile->max_state || !devfreq->profile->freq_table) {
    829		mutex_unlock(&devfreq->lock);
    830		err = set_freq_table(devfreq);
    831		if (err < 0)
    832			goto err_dev;
    833		mutex_lock(&devfreq->lock);
    834	} else {
    835		devfreq->freq_table = devfreq->profile->freq_table;
    836		devfreq->max_state = devfreq->profile->max_state;
    837	}
    838
    839	devfreq->scaling_min_freq = find_available_min_freq(devfreq);
    840	if (!devfreq->scaling_min_freq) {
    841		mutex_unlock(&devfreq->lock);
    842		err = -EINVAL;
    843		goto err_dev;
    844	}
    845
    846	devfreq->scaling_max_freq = find_available_max_freq(devfreq);
    847	if (!devfreq->scaling_max_freq) {
    848		mutex_unlock(&devfreq->lock);
    849		err = -EINVAL;
    850		goto err_dev;
    851	}
    852
    853	devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
    854
    855	devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
    856	devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
    857	if (IS_ERR(devfreq->opp_table))
    858		devfreq->opp_table = NULL;
    859
    860	atomic_set(&devfreq->suspend_count, 0);
    861
    862	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
    863	err = device_register(&devfreq->dev);
    864	if (err) {
    865		mutex_unlock(&devfreq->lock);
    866		put_device(&devfreq->dev);
    867		goto err_out;
    868	}
    869
    870	devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
    871			array3_size(sizeof(unsigned int),
    872				    devfreq->max_state,
    873				    devfreq->max_state),
    874			GFP_KERNEL);
    875	if (!devfreq->stats.trans_table) {
    876		mutex_unlock(&devfreq->lock);
    877		err = -ENOMEM;
    878		goto err_devfreq;
    879	}
    880
    881	devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
    882			devfreq->max_state,
    883			sizeof(*devfreq->stats.time_in_state),
    884			GFP_KERNEL);
    885	if (!devfreq->stats.time_in_state) {
    886		mutex_unlock(&devfreq->lock);
    887		err = -ENOMEM;
    888		goto err_devfreq;
    889	}
    890
    891	devfreq->stats.total_trans = 0;
    892	devfreq->stats.last_update = get_jiffies_64();
    893
    894	srcu_init_notifier_head(&devfreq->transition_notifier_list);
    895
    896	mutex_unlock(&devfreq->lock);
    897
    898	err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
    899				     DEV_PM_QOS_MIN_FREQUENCY, 0);
    900	if (err < 0)
    901		goto err_devfreq;
    902	err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
    903				     DEV_PM_QOS_MAX_FREQUENCY,
    904				     PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
    905	if (err < 0)
    906		goto err_devfreq;
    907
    908	devfreq->nb_min.notifier_call = qos_min_notifier_call;
    909	err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min,
    910				      DEV_PM_QOS_MIN_FREQUENCY);
    911	if (err)
    912		goto err_devfreq;
    913
    914	devfreq->nb_max.notifier_call = qos_max_notifier_call;
    915	err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max,
    916				      DEV_PM_QOS_MAX_FREQUENCY);
    917	if (err)
    918		goto err_devfreq;
    919
    920	mutex_lock(&devfreq_list_lock);
    921
    922	governor = try_then_request_governor(governor_name);
    923	if (IS_ERR(governor)) {
    924		dev_err(dev, "%s: Unable to find governor for the device\n",
    925			__func__);
    926		err = PTR_ERR(governor);
    927		goto err_init;
    928	}
    929
    930	devfreq->governor = governor;
    931	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
    932						NULL);
    933	if (err) {
    934		dev_err_probe(dev, err,
    935			"%s: Unable to start governor for the device\n",
    936			 __func__);
    937		goto err_init;
    938	}
    939	create_sysfs_files(devfreq, devfreq->governor);
    940
    941	list_add(&devfreq->node, &devfreq_list);
    942
    943	mutex_unlock(&devfreq_list_lock);
    944
    945	if (devfreq->profile->is_cooling_device) {
    946		devfreq->cdev = devfreq_cooling_em_register(devfreq, NULL);
    947		if (IS_ERR(devfreq->cdev))
    948			devfreq->cdev = NULL;
    949	}
    950
    951	return devfreq;
    952
    953err_init:
    954	mutex_unlock(&devfreq_list_lock);
    955err_devfreq:
    956	devfreq_remove_device(devfreq);
    957	devfreq = NULL;
    958err_dev:
    959	kfree(devfreq);
    960err_out:
    961	return ERR_PTR(err);
    962}
    963EXPORT_SYMBOL(devfreq_add_device);
    964
    965/**
    966 * devfreq_remove_device() - Remove devfreq feature from a device.
    967 * @devfreq:	the devfreq instance to be removed
    968 *
    969 * The opposite of devfreq_add_device().
    970 */
    971int devfreq_remove_device(struct devfreq *devfreq)
    972{
    973	if (!devfreq)
    974		return -EINVAL;
    975
    976	devfreq_cooling_unregister(devfreq->cdev);
    977
    978	if (devfreq->governor) {
    979		devfreq->governor->event_handler(devfreq,
    980						 DEVFREQ_GOV_STOP, NULL);
    981		remove_sysfs_files(devfreq, devfreq->governor);
    982	}
    983
    984	device_unregister(&devfreq->dev);
    985
    986	return 0;
    987}
    988EXPORT_SYMBOL(devfreq_remove_device);
    989
    990static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
    991{
    992	struct devfreq **r = res;
    993
    994	if (WARN_ON(!r || !*r))
    995		return 0;
    996
    997	return *r == data;
    998}
    999
   1000static void devm_devfreq_dev_release(struct device *dev, void *res)
   1001{
   1002	devfreq_remove_device(*(struct devfreq **)res);
   1003}
   1004
   1005/**
   1006 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
   1007 * @dev:	the device to add devfreq feature.
   1008 * @profile:	device-specific profile to run devfreq.
   1009 * @governor_name:	name of the policy to choose frequency.
   1010 * @data:	private data for the governor. The devfreq framework does not
   1011 *		touch this value.
   1012 *
   1013 * This function manages automatically the memory of devfreq device using device
   1014 * resource management and simplify the free operation for memory of devfreq
   1015 * device.
   1016 */
   1017struct devfreq *devm_devfreq_add_device(struct device *dev,
   1018					struct devfreq_dev_profile *profile,
   1019					const char *governor_name,
   1020					void *data)
   1021{
   1022	struct devfreq **ptr, *devfreq;
   1023
   1024	ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
   1025	if (!ptr)
   1026		return ERR_PTR(-ENOMEM);
   1027
   1028	devfreq = devfreq_add_device(dev, profile, governor_name, data);
   1029	if (IS_ERR(devfreq)) {
   1030		devres_free(ptr);
   1031		return devfreq;
   1032	}
   1033
   1034	*ptr = devfreq;
   1035	devres_add(dev, ptr);
   1036
   1037	return devfreq;
   1038}
   1039EXPORT_SYMBOL(devm_devfreq_add_device);
   1040
   1041#ifdef CONFIG_OF
   1042/*
   1043 * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
   1044 * @node - pointer to device_node
   1045 *
   1046 * return the instance of devfreq device
   1047 */
   1048struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
   1049{
   1050	struct devfreq *devfreq;
   1051
   1052	if (!node)
   1053		return ERR_PTR(-EINVAL);
   1054
   1055	mutex_lock(&devfreq_list_lock);
   1056	list_for_each_entry(devfreq, &devfreq_list, node) {
   1057		if (devfreq->dev.parent
   1058			&& devfreq->dev.parent->of_node == node) {
   1059			mutex_unlock(&devfreq_list_lock);
   1060			return devfreq;
   1061		}
   1062	}
   1063	mutex_unlock(&devfreq_list_lock);
   1064
   1065	return ERR_PTR(-ENODEV);
   1066}
   1067
   1068/*
   1069 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
   1070 * @dev - instance to the given device
   1071 * @phandle_name - name of property holding a phandle value
   1072 * @index - index into list of devfreq
   1073 *
   1074 * return the instance of devfreq device
   1075 */
   1076struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
   1077					const char *phandle_name, int index)
   1078{
   1079	struct device_node *node;
   1080	struct devfreq *devfreq;
   1081
   1082	if (!dev || !phandle_name)
   1083		return ERR_PTR(-EINVAL);
   1084
   1085	if (!dev->of_node)
   1086		return ERR_PTR(-EINVAL);
   1087
   1088	node = of_parse_phandle(dev->of_node, phandle_name, index);
   1089	if (!node)
   1090		return ERR_PTR(-ENODEV);
   1091
   1092	devfreq = devfreq_get_devfreq_by_node(node);
   1093	of_node_put(node);
   1094
   1095	return devfreq;
   1096}
   1097
   1098#else
   1099struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
   1100{
   1101	return ERR_PTR(-ENODEV);
   1102}
   1103
   1104struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
   1105					const char *phandle_name, int index)
   1106{
   1107	return ERR_PTR(-ENODEV);
   1108}
   1109#endif /* CONFIG_OF */
   1110EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
   1111EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
   1112
   1113/**
   1114 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
   1115 * @dev:	the device from which to remove devfreq feature.
   1116 * @devfreq:	the devfreq instance to be removed
   1117 */
   1118void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
   1119{
   1120	WARN_ON(devres_release(dev, devm_devfreq_dev_release,
   1121			       devm_devfreq_dev_match, devfreq));
   1122}
   1123EXPORT_SYMBOL(devm_devfreq_remove_device);
   1124
   1125/**
   1126 * devfreq_suspend_device() - Suspend devfreq of a device.
   1127 * @devfreq: the devfreq instance to be suspended
   1128 *
   1129 * This function is intended to be called by the pm callbacks
   1130 * (e.g., runtime_suspend, suspend) of the device driver that
   1131 * holds the devfreq.
   1132 */
   1133int devfreq_suspend_device(struct devfreq *devfreq)
   1134{
   1135	int ret;
   1136
   1137	if (!devfreq)
   1138		return -EINVAL;
   1139
   1140	if (atomic_inc_return(&devfreq->suspend_count) > 1)
   1141		return 0;
   1142
   1143	if (devfreq->governor) {
   1144		ret = devfreq->governor->event_handler(devfreq,
   1145					DEVFREQ_GOV_SUSPEND, NULL);
   1146		if (ret)
   1147			return ret;
   1148	}
   1149
   1150	if (devfreq->suspend_freq) {
   1151		mutex_lock(&devfreq->lock);
   1152		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
   1153		mutex_unlock(&devfreq->lock);
   1154		if (ret)
   1155			return ret;
   1156	}
   1157
   1158	return 0;
   1159}
   1160EXPORT_SYMBOL(devfreq_suspend_device);
   1161
   1162/**
   1163 * devfreq_resume_device() - Resume devfreq of a device.
   1164 * @devfreq: the devfreq instance to be resumed
   1165 *
   1166 * This function is intended to be called by the pm callbacks
   1167 * (e.g., runtime_resume, resume) of the device driver that
   1168 * holds the devfreq.
   1169 */
   1170int devfreq_resume_device(struct devfreq *devfreq)
   1171{
   1172	int ret;
   1173
   1174	if (!devfreq)
   1175		return -EINVAL;
   1176
   1177	if (atomic_dec_return(&devfreq->suspend_count) >= 1)
   1178		return 0;
   1179
   1180	if (devfreq->resume_freq) {
   1181		mutex_lock(&devfreq->lock);
   1182		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
   1183		mutex_unlock(&devfreq->lock);
   1184		if (ret)
   1185			return ret;
   1186	}
   1187
   1188	if (devfreq->governor) {
   1189		ret = devfreq->governor->event_handler(devfreq,
   1190					DEVFREQ_GOV_RESUME, NULL);
   1191		if (ret)
   1192			return ret;
   1193	}
   1194
   1195	return 0;
   1196}
   1197EXPORT_SYMBOL(devfreq_resume_device);
   1198
   1199/**
   1200 * devfreq_suspend() - Suspend devfreq governors and devices
   1201 *
   1202 * Called during system wide Suspend/Hibernate cycles for suspending governors
   1203 * and devices preserving the state for resume. On some platforms the devfreq
   1204 * device must have precise state (frequency) after resume in order to provide
   1205 * fully operating setup.
   1206 */
   1207void devfreq_suspend(void)
   1208{
   1209	struct devfreq *devfreq;
   1210	int ret;
   1211
   1212	mutex_lock(&devfreq_list_lock);
   1213	list_for_each_entry(devfreq, &devfreq_list, node) {
   1214		ret = devfreq_suspend_device(devfreq);
   1215		if (ret)
   1216			dev_err(&devfreq->dev,
   1217				"failed to suspend devfreq device\n");
   1218	}
   1219	mutex_unlock(&devfreq_list_lock);
   1220}
   1221
   1222/**
   1223 * devfreq_resume() - Resume devfreq governors and devices
   1224 *
   1225 * Called during system wide Suspend/Hibernate cycle for resuming governors and
   1226 * devices that are suspended with devfreq_suspend().
   1227 */
   1228void devfreq_resume(void)
   1229{
   1230	struct devfreq *devfreq;
   1231	int ret;
   1232
   1233	mutex_lock(&devfreq_list_lock);
   1234	list_for_each_entry(devfreq, &devfreq_list, node) {
   1235		ret = devfreq_resume_device(devfreq);
   1236		if (ret)
   1237			dev_warn(&devfreq->dev,
   1238				 "failed to resume devfreq device\n");
   1239	}
   1240	mutex_unlock(&devfreq_list_lock);
   1241}
   1242
   1243/**
   1244 * devfreq_add_governor() - Add devfreq governor
   1245 * @governor:	the devfreq governor to be added
   1246 */
   1247int devfreq_add_governor(struct devfreq_governor *governor)
   1248{
   1249	struct devfreq_governor *g;
   1250	struct devfreq *devfreq;
   1251	int err = 0;
   1252
   1253	if (!governor) {
   1254		pr_err("%s: Invalid parameters.\n", __func__);
   1255		return -EINVAL;
   1256	}
   1257
   1258	mutex_lock(&devfreq_list_lock);
   1259	g = find_devfreq_governor(governor->name);
   1260	if (!IS_ERR(g)) {
   1261		pr_err("%s: governor %s already registered\n", __func__,
   1262		       g->name);
   1263		err = -EINVAL;
   1264		goto err_out;
   1265	}
   1266
   1267	list_add(&governor->node, &devfreq_governor_list);
   1268
   1269	list_for_each_entry(devfreq, &devfreq_list, node) {
   1270		int ret = 0;
   1271		struct device *dev = devfreq->dev.parent;
   1272
   1273		if (!strncmp(devfreq->governor->name, governor->name,
   1274			     DEVFREQ_NAME_LEN)) {
   1275			/* The following should never occur */
   1276			if (devfreq->governor) {
   1277				dev_warn(dev,
   1278					 "%s: Governor %s already present\n",
   1279					 __func__, devfreq->governor->name);
   1280				ret = devfreq->governor->event_handler(devfreq,
   1281							DEVFREQ_GOV_STOP, NULL);
   1282				if (ret) {
   1283					dev_warn(dev,
   1284						 "%s: Governor %s stop = %d\n",
   1285						 __func__,
   1286						 devfreq->governor->name, ret);
   1287				}
   1288				/* Fall through */
   1289			}
   1290			devfreq->governor = governor;
   1291			ret = devfreq->governor->event_handler(devfreq,
   1292						DEVFREQ_GOV_START, NULL);
   1293			if (ret) {
   1294				dev_warn(dev, "%s: Governor %s start=%d\n",
   1295					 __func__, devfreq->governor->name,
   1296					 ret);
   1297			}
   1298		}
   1299	}
   1300
   1301err_out:
   1302	mutex_unlock(&devfreq_list_lock);
   1303
   1304	return err;
   1305}
   1306EXPORT_SYMBOL(devfreq_add_governor);
   1307
   1308static void devm_devfreq_remove_governor(void *governor)
   1309{
   1310	WARN_ON(devfreq_remove_governor(governor));
   1311}
   1312
   1313/**
   1314 * devm_devfreq_add_governor() - Add devfreq governor
   1315 * @dev:	device which adds devfreq governor
   1316 * @governor:	the devfreq governor to be added
   1317 *
   1318 * This is a resource-managed variant of devfreq_add_governor().
   1319 */
   1320int devm_devfreq_add_governor(struct device *dev,
   1321			      struct devfreq_governor *governor)
   1322{
   1323	int err;
   1324
   1325	err = devfreq_add_governor(governor);
   1326	if (err)
   1327		return err;
   1328
   1329	return devm_add_action_or_reset(dev, devm_devfreq_remove_governor,
   1330					governor);
   1331}
   1332EXPORT_SYMBOL(devm_devfreq_add_governor);
   1333
   1334/**
   1335 * devfreq_remove_governor() - Remove devfreq feature from a device.
   1336 * @governor:	the devfreq governor to be removed
   1337 */
   1338int devfreq_remove_governor(struct devfreq_governor *governor)
   1339{
   1340	struct devfreq_governor *g;
   1341	struct devfreq *devfreq;
   1342	int err = 0;
   1343
   1344	if (!governor) {
   1345		pr_err("%s: Invalid parameters.\n", __func__);
   1346		return -EINVAL;
   1347	}
   1348
   1349	mutex_lock(&devfreq_list_lock);
   1350	g = find_devfreq_governor(governor->name);
   1351	if (IS_ERR(g)) {
   1352		pr_err("%s: governor %s not registered\n", __func__,
   1353		       governor->name);
   1354		err = PTR_ERR(g);
   1355		goto err_out;
   1356	}
   1357	list_for_each_entry(devfreq, &devfreq_list, node) {
   1358		int ret;
   1359		struct device *dev = devfreq->dev.parent;
   1360
   1361		if (!strncmp(devfreq->governor->name, governor->name,
   1362			     DEVFREQ_NAME_LEN)) {
   1363			/* we should have a devfreq governor! */
   1364			if (!devfreq->governor) {
   1365				dev_warn(dev, "%s: Governor %s NOT present\n",
   1366					 __func__, governor->name);
   1367				continue;
   1368				/* Fall through */
   1369			}
   1370			ret = devfreq->governor->event_handler(devfreq,
   1371						DEVFREQ_GOV_STOP, NULL);
   1372			if (ret) {
   1373				dev_warn(dev, "%s: Governor %s stop=%d\n",
   1374					 __func__, devfreq->governor->name,
   1375					 ret);
   1376			}
   1377			devfreq->governor = NULL;
   1378		}
   1379	}
   1380
   1381	list_del(&governor->node);
   1382err_out:
   1383	mutex_unlock(&devfreq_list_lock);
   1384
   1385	return err;
   1386}
   1387EXPORT_SYMBOL(devfreq_remove_governor);
   1388
   1389static ssize_t name_show(struct device *dev,
   1390			struct device_attribute *attr, char *buf)
   1391{
   1392	struct devfreq *df = to_devfreq(dev);
   1393	return sprintf(buf, "%s\n", dev_name(df->dev.parent));
   1394}
   1395static DEVICE_ATTR_RO(name);
   1396
   1397static ssize_t governor_show(struct device *dev,
   1398			     struct device_attribute *attr, char *buf)
   1399{
   1400	struct devfreq *df = to_devfreq(dev);
   1401
   1402	if (!df->governor)
   1403		return -EINVAL;
   1404
   1405	return sprintf(buf, "%s\n", df->governor->name);
   1406}
   1407
   1408static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
   1409			      const char *buf, size_t count)
   1410{
   1411	struct devfreq *df = to_devfreq(dev);
   1412	int ret;
   1413	char str_governor[DEVFREQ_NAME_LEN + 1];
   1414	const struct devfreq_governor *governor, *prev_governor;
   1415
   1416	if (!df->governor)
   1417		return -EINVAL;
   1418
   1419	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
   1420	if (ret != 1)
   1421		return -EINVAL;
   1422
   1423	mutex_lock(&devfreq_list_lock);
   1424	governor = try_then_request_governor(str_governor);
   1425	if (IS_ERR(governor)) {
   1426		ret = PTR_ERR(governor);
   1427		goto out;
   1428	}
   1429	if (df->governor == governor) {
   1430		ret = 0;
   1431		goto out;
   1432	} else if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE)
   1433		|| IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE)) {
   1434		ret = -EINVAL;
   1435		goto out;
   1436	}
   1437
   1438	/*
   1439	 * Stop the current governor and remove the specific sysfs files
   1440	 * which depend on current governor.
   1441	 */
   1442	ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
   1443	if (ret) {
   1444		dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
   1445			 __func__, df->governor->name, ret);
   1446		goto out;
   1447	}
   1448	remove_sysfs_files(df, df->governor);
   1449
   1450	/*
   1451	 * Start the new governor and create the specific sysfs files
   1452	 * which depend on the new governor.
   1453	 */
   1454	prev_governor = df->governor;
   1455	df->governor = governor;
   1456	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
   1457	if (ret) {
   1458		dev_warn(dev, "%s: Governor %s not started(%d)\n",
   1459			 __func__, df->governor->name, ret);
   1460
   1461		/* Restore previous governor */
   1462		df->governor = prev_governor;
   1463		ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
   1464		if (ret) {
   1465			dev_err(dev,
   1466				"%s: reverting to Governor %s failed (%d)\n",
   1467				__func__, prev_governor->name, ret);
   1468			df->governor = NULL;
   1469			goto out;
   1470		}
   1471	}
   1472
   1473	/*
   1474	 * Create the sysfs files for the new governor. But if failed to start
   1475	 * the new governor, restore the sysfs files of previous governor.
   1476	 */
   1477	create_sysfs_files(df, df->governor);
   1478
   1479out:
   1480	mutex_unlock(&devfreq_list_lock);
   1481
   1482	if (!ret)
   1483		ret = count;
   1484	return ret;
   1485}
   1486static DEVICE_ATTR_RW(governor);
   1487
   1488static ssize_t available_governors_show(struct device *d,
   1489					struct device_attribute *attr,
   1490					char *buf)
   1491{
   1492	struct devfreq *df = to_devfreq(d);
   1493	ssize_t count = 0;
   1494
   1495	if (!df->governor)
   1496		return -EINVAL;
   1497
   1498	mutex_lock(&devfreq_list_lock);
   1499
   1500	/*
   1501	 * The devfreq with immutable governor (e.g., passive) shows
   1502	 * only own governor.
   1503	 */
   1504	if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE)) {
   1505		count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
   1506				  "%s ", df->governor->name);
   1507	/*
   1508	 * The devfreq device shows the registered governor except for
   1509	 * immutable governors such as passive governor .
   1510	 */
   1511	} else {
   1512		struct devfreq_governor *governor;
   1513
   1514		list_for_each_entry(governor, &devfreq_governor_list, node) {
   1515			if (IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE))
   1516				continue;
   1517			count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
   1518					   "%s ", governor->name);
   1519		}
   1520	}
   1521
   1522	mutex_unlock(&devfreq_list_lock);
   1523
   1524	/* Truncate the trailing space */
   1525	if (count)
   1526		count--;
   1527
   1528	count += sprintf(&buf[count], "\n");
   1529
   1530	return count;
   1531}
   1532static DEVICE_ATTR_RO(available_governors);
   1533
   1534static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
   1535			     char *buf)
   1536{
   1537	unsigned long freq;
   1538	struct devfreq *df = to_devfreq(dev);
   1539
   1540	if (!df->profile)
   1541		return -EINVAL;
   1542
   1543	if (df->profile->get_cur_freq &&
   1544		!df->profile->get_cur_freq(df->dev.parent, &freq))
   1545		return sprintf(buf, "%lu\n", freq);
   1546
   1547	return sprintf(buf, "%lu\n", df->previous_freq);
   1548}
   1549static DEVICE_ATTR_RO(cur_freq);
   1550
   1551static ssize_t target_freq_show(struct device *dev,
   1552				struct device_attribute *attr, char *buf)
   1553{
   1554	struct devfreq *df = to_devfreq(dev);
   1555
   1556	return sprintf(buf, "%lu\n", df->previous_freq);
   1557}
   1558static DEVICE_ATTR_RO(target_freq);
   1559
   1560static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
   1561			      const char *buf, size_t count)
   1562{
   1563	struct devfreq *df = to_devfreq(dev);
   1564	unsigned long value;
   1565	int ret;
   1566
   1567	/*
   1568	 * Protect against theoretical sysfs writes between
   1569	 * device_add and dev_pm_qos_add_request
   1570	 */
   1571	if (!dev_pm_qos_request_active(&df->user_min_freq_req))
   1572		return -EAGAIN;
   1573
   1574	ret = sscanf(buf, "%lu", &value);
   1575	if (ret != 1)
   1576		return -EINVAL;
   1577
   1578	/* Round down to kHz for PM QoS */
   1579	ret = dev_pm_qos_update_request(&df->user_min_freq_req,
   1580					value / HZ_PER_KHZ);
   1581	if (ret < 0)
   1582		return ret;
   1583
   1584	return count;
   1585}
   1586
   1587static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
   1588			     char *buf)
   1589{
   1590	struct devfreq *df = to_devfreq(dev);
   1591	unsigned long min_freq, max_freq;
   1592
   1593	mutex_lock(&df->lock);
   1594	devfreq_get_freq_range(df, &min_freq, &max_freq);
   1595	mutex_unlock(&df->lock);
   1596
   1597	return sprintf(buf, "%lu\n", min_freq);
   1598}
   1599static DEVICE_ATTR_RW(min_freq);
   1600
   1601static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
   1602			      const char *buf, size_t count)
   1603{
   1604	struct devfreq *df = to_devfreq(dev);
   1605	unsigned long value;
   1606	int ret;
   1607
   1608	/*
   1609	 * Protect against theoretical sysfs writes between
   1610	 * device_add and dev_pm_qos_add_request
   1611	 */
   1612	if (!dev_pm_qos_request_active(&df->user_max_freq_req))
   1613		return -EINVAL;
   1614
   1615	ret = sscanf(buf, "%lu", &value);
   1616	if (ret != 1)
   1617		return -EINVAL;
   1618
   1619	/*
   1620	 * PM QoS frequencies are in kHz so we need to convert. Convert by
   1621	 * rounding upwards so that the acceptable interval never shrinks.
   1622	 *
   1623	 * For example if the user writes "666666666" to sysfs this value will
   1624	 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
   1625	 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
   1626	 *
   1627	 * A value of zero means "no limit".
   1628	 */
   1629	if (value)
   1630		value = DIV_ROUND_UP(value, HZ_PER_KHZ);
   1631	else
   1632		value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
   1633
   1634	ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
   1635	if (ret < 0)
   1636		return ret;
   1637
   1638	return count;
   1639}
   1640
   1641static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
   1642			     char *buf)
   1643{
   1644	struct devfreq *df = to_devfreq(dev);
   1645	unsigned long min_freq, max_freq;
   1646
   1647	mutex_lock(&df->lock);
   1648	devfreq_get_freq_range(df, &min_freq, &max_freq);
   1649	mutex_unlock(&df->lock);
   1650
   1651	return sprintf(buf, "%lu\n", max_freq);
   1652}
   1653static DEVICE_ATTR_RW(max_freq);
   1654
   1655static ssize_t available_frequencies_show(struct device *d,
   1656					  struct device_attribute *attr,
   1657					  char *buf)
   1658{
   1659	struct devfreq *df = to_devfreq(d);
   1660	ssize_t count = 0;
   1661	int i;
   1662
   1663	if (!df->profile)
   1664		return -EINVAL;
   1665
   1666	mutex_lock(&df->lock);
   1667
   1668	for (i = 0; i < df->max_state; i++)
   1669		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
   1670				"%lu ", df->freq_table[i]);
   1671
   1672	mutex_unlock(&df->lock);
   1673	/* Truncate the trailing space */
   1674	if (count)
   1675		count--;
   1676
   1677	count += sprintf(&buf[count], "\n");
   1678
   1679	return count;
   1680}
   1681static DEVICE_ATTR_RO(available_frequencies);
   1682
   1683static ssize_t trans_stat_show(struct device *dev,
   1684			       struct device_attribute *attr, char *buf)
   1685{
   1686	struct devfreq *df = to_devfreq(dev);
   1687	ssize_t len;
   1688	int i, j;
   1689	unsigned int max_state;
   1690
   1691	if (!df->profile)
   1692		return -EINVAL;
   1693	max_state = df->max_state;
   1694
   1695	if (max_state == 0)
   1696		return sprintf(buf, "Not Supported.\n");
   1697
   1698	mutex_lock(&df->lock);
   1699	if (!df->stop_polling &&
   1700			devfreq_update_status(df, df->previous_freq)) {
   1701		mutex_unlock(&df->lock);
   1702		return 0;
   1703	}
   1704	mutex_unlock(&df->lock);
   1705
   1706	len = sprintf(buf, "     From  :   To\n");
   1707	len += sprintf(buf + len, "           :");
   1708	for (i = 0; i < max_state; i++)
   1709		len += sprintf(buf + len, "%10lu",
   1710				df->freq_table[i]);
   1711
   1712	len += sprintf(buf + len, "   time(ms)\n");
   1713
   1714	for (i = 0; i < max_state; i++) {
   1715		if (df->freq_table[i] == df->previous_freq)
   1716			len += sprintf(buf + len, "*");
   1717		else
   1718			len += sprintf(buf + len, " ");
   1719
   1720		len += sprintf(buf + len, "%10lu:", df->freq_table[i]);
   1721		for (j = 0; j < max_state; j++)
   1722			len += sprintf(buf + len, "%10u",
   1723				df->stats.trans_table[(i * max_state) + j]);
   1724
   1725		len += sprintf(buf + len, "%10llu\n", (u64)
   1726			jiffies64_to_msecs(df->stats.time_in_state[i]));
   1727	}
   1728
   1729	len += sprintf(buf + len, "Total transition : %u\n",
   1730					df->stats.total_trans);
   1731	return len;
   1732}
   1733
   1734static ssize_t trans_stat_store(struct device *dev,
   1735				struct device_attribute *attr,
   1736				const char *buf, size_t count)
   1737{
   1738	struct devfreq *df = to_devfreq(dev);
   1739	int err, value;
   1740
   1741	if (!df->profile)
   1742		return -EINVAL;
   1743
   1744	if (df->max_state == 0)
   1745		return count;
   1746
   1747	err = kstrtoint(buf, 10, &value);
   1748	if (err || value != 0)
   1749		return -EINVAL;
   1750
   1751	mutex_lock(&df->lock);
   1752	memset(df->stats.time_in_state, 0, (df->max_state *
   1753					sizeof(*df->stats.time_in_state)));
   1754	memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
   1755					df->max_state,
   1756					df->max_state));
   1757	df->stats.total_trans = 0;
   1758	df->stats.last_update = get_jiffies_64();
   1759	mutex_unlock(&df->lock);
   1760
   1761	return count;
   1762}
   1763static DEVICE_ATTR_RW(trans_stat);
   1764
   1765static struct attribute *devfreq_attrs[] = {
   1766	&dev_attr_name.attr,
   1767	&dev_attr_governor.attr,
   1768	&dev_attr_available_governors.attr,
   1769	&dev_attr_cur_freq.attr,
   1770	&dev_attr_available_frequencies.attr,
   1771	&dev_attr_target_freq.attr,
   1772	&dev_attr_min_freq.attr,
   1773	&dev_attr_max_freq.attr,
   1774	&dev_attr_trans_stat.attr,
   1775	NULL,
   1776};
   1777ATTRIBUTE_GROUPS(devfreq);
   1778
   1779static ssize_t polling_interval_show(struct device *dev,
   1780				     struct device_attribute *attr, char *buf)
   1781{
   1782	struct devfreq *df = to_devfreq(dev);
   1783
   1784	if (!df->profile)
   1785		return -EINVAL;
   1786
   1787	return sprintf(buf, "%d\n", df->profile->polling_ms);
   1788}
   1789
   1790static ssize_t polling_interval_store(struct device *dev,
   1791				      struct device_attribute *attr,
   1792				      const char *buf, size_t count)
   1793{
   1794	struct devfreq *df = to_devfreq(dev);
   1795	unsigned int value;
   1796	int ret;
   1797
   1798	if (!df->governor)
   1799		return -EINVAL;
   1800
   1801	ret = sscanf(buf, "%u", &value);
   1802	if (ret != 1)
   1803		return -EINVAL;
   1804
   1805	df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
   1806	ret = count;
   1807
   1808	return ret;
   1809}
   1810static DEVICE_ATTR_RW(polling_interval);
   1811
   1812static ssize_t timer_show(struct device *dev,
   1813			     struct device_attribute *attr, char *buf)
   1814{
   1815	struct devfreq *df = to_devfreq(dev);
   1816
   1817	if (!df->profile)
   1818		return -EINVAL;
   1819
   1820	return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
   1821}
   1822
   1823static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
   1824			      const char *buf, size_t count)
   1825{
   1826	struct devfreq *df = to_devfreq(dev);
   1827	char str_timer[DEVFREQ_NAME_LEN + 1];
   1828	int timer = -1;
   1829	int ret = 0, i;
   1830
   1831	if (!df->governor || !df->profile)
   1832		return -EINVAL;
   1833
   1834	ret = sscanf(buf, "%16s", str_timer);
   1835	if (ret != 1)
   1836		return -EINVAL;
   1837
   1838	for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
   1839		if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
   1840			timer = i;
   1841			break;
   1842		}
   1843	}
   1844
   1845	if (timer < 0) {
   1846		ret = -EINVAL;
   1847		goto out;
   1848	}
   1849
   1850	if (df->profile->timer == timer) {
   1851		ret = 0;
   1852		goto out;
   1853	}
   1854
   1855	mutex_lock(&df->lock);
   1856	df->profile->timer = timer;
   1857	mutex_unlock(&df->lock);
   1858
   1859	ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
   1860	if (ret) {
   1861		dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
   1862			 __func__, df->governor->name, ret);
   1863		goto out;
   1864	}
   1865
   1866	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
   1867	if (ret)
   1868		dev_warn(dev, "%s: Governor %s not started(%d)\n",
   1869			 __func__, df->governor->name, ret);
   1870out:
   1871	return ret ? ret : count;
   1872}
   1873static DEVICE_ATTR_RW(timer);
   1874
   1875#define CREATE_SYSFS_FILE(df, name)					\
   1876{									\
   1877	int ret;							\
   1878	ret = sysfs_create_file(&df->dev.kobj, &dev_attr_##name.attr);	\
   1879	if (ret < 0) {							\
   1880		dev_warn(&df->dev,					\
   1881			"Unable to create attr(%s)\n", "##name");	\
   1882	}								\
   1883}									\
   1884
   1885/* Create the specific sysfs files which depend on each governor. */
   1886static void create_sysfs_files(struct devfreq *devfreq,
   1887				const struct devfreq_governor *gov)
   1888{
   1889	if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL))
   1890		CREATE_SYSFS_FILE(devfreq, polling_interval);
   1891	if (IS_SUPPORTED_ATTR(gov->attrs, TIMER))
   1892		CREATE_SYSFS_FILE(devfreq, timer);
   1893}
   1894
   1895/* Remove the specific sysfs files which depend on each governor. */
   1896static void remove_sysfs_files(struct devfreq *devfreq,
   1897				const struct devfreq_governor *gov)
   1898{
   1899	if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL))
   1900		sysfs_remove_file(&devfreq->dev.kobj,
   1901				&dev_attr_polling_interval.attr);
   1902	if (IS_SUPPORTED_ATTR(gov->attrs, TIMER))
   1903		sysfs_remove_file(&devfreq->dev.kobj, &dev_attr_timer.attr);
   1904}
   1905
   1906/**
   1907 * devfreq_summary_show() - Show the summary of the devfreq devices
   1908 * @s:		seq_file instance to show the summary of devfreq devices
   1909 * @data:	not used
   1910 *
   1911 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
   1912 * It helps that user can know the detailed information of the devfreq devices.
   1913 *
   1914 * Return 0 always because it shows the information without any data change.
   1915 */
   1916static int devfreq_summary_show(struct seq_file *s, void *data)
   1917{
   1918	struct devfreq *devfreq;
   1919	struct devfreq *p_devfreq = NULL;
   1920	unsigned long cur_freq, min_freq, max_freq;
   1921	unsigned int polling_ms;
   1922	unsigned int timer;
   1923
   1924	seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n",
   1925			"dev",
   1926			"parent_dev",
   1927			"governor",
   1928			"timer",
   1929			"polling_ms",
   1930			"cur_freq_Hz",
   1931			"min_freq_Hz",
   1932			"max_freq_Hz");
   1933	seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n",
   1934			"------------------------------",
   1935			"------------------------------",
   1936			"---------------",
   1937			"----------",
   1938			"----------",
   1939			"------------",
   1940			"------------",
   1941			"------------");
   1942
   1943	mutex_lock(&devfreq_list_lock);
   1944
   1945	list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
   1946#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
   1947		if (!strncmp(devfreq->governor->name, DEVFREQ_GOV_PASSIVE,
   1948							DEVFREQ_NAME_LEN)) {
   1949			struct devfreq_passive_data *data = devfreq->data;
   1950
   1951			if (data)
   1952				p_devfreq = data->parent;
   1953		} else {
   1954			p_devfreq = NULL;
   1955		}
   1956#endif
   1957
   1958		mutex_lock(&devfreq->lock);
   1959		cur_freq = devfreq->previous_freq;
   1960		devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
   1961		timer = devfreq->profile->timer;
   1962
   1963		if (IS_SUPPORTED_ATTR(devfreq->governor->attrs, POLLING_INTERVAL))
   1964			polling_ms = devfreq->profile->polling_ms;
   1965		else
   1966			polling_ms = 0;
   1967		mutex_unlock(&devfreq->lock);
   1968
   1969		seq_printf(s,
   1970			"%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n",
   1971			dev_name(&devfreq->dev),
   1972			p_devfreq ? dev_name(&p_devfreq->dev) : "null",
   1973			devfreq->governor->name,
   1974			polling_ms ? timer_name[timer] : "null",
   1975			polling_ms,
   1976			cur_freq,
   1977			min_freq,
   1978			max_freq);
   1979	}
   1980
   1981	mutex_unlock(&devfreq_list_lock);
   1982
   1983	return 0;
   1984}
   1985DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
   1986
   1987static int __init devfreq_init(void)
   1988{
   1989	devfreq_class = class_create(THIS_MODULE, "devfreq");
   1990	if (IS_ERR(devfreq_class)) {
   1991		pr_err("%s: couldn't create class\n", __FILE__);
   1992		return PTR_ERR(devfreq_class);
   1993	}
   1994
   1995	devfreq_wq = create_freezable_workqueue("devfreq_wq");
   1996	if (!devfreq_wq) {
   1997		class_destroy(devfreq_class);
   1998		pr_err("%s: couldn't create workqueue\n", __FILE__);
   1999		return -ENOMEM;
   2000	}
   2001	devfreq_class->dev_groups = devfreq_groups;
   2002
   2003	devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
   2004	debugfs_create_file("devfreq_summary", 0444,
   2005				devfreq_debugfs, NULL,
   2006				&devfreq_summary_fops);
   2007
   2008	return 0;
   2009}
   2010subsys_initcall(devfreq_init);
   2011
   2012/*
   2013 * The following are helper functions for devfreq user device drivers with
   2014 * OPP framework.
   2015 */
   2016
   2017/**
   2018 * devfreq_recommended_opp() - Helper function to get proper OPP for the
   2019 *			     freq value given to target callback.
   2020 * @dev:	The devfreq user device. (parent of devfreq)
   2021 * @freq:	The frequency given to target function
   2022 * @flags:	Flags handed from devfreq framework.
   2023 *
   2024 * The callers are required to call dev_pm_opp_put() for the returned OPP after
   2025 * use.
   2026 */
   2027struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
   2028					   unsigned long *freq,
   2029					   u32 flags)
   2030{
   2031	struct dev_pm_opp *opp;
   2032
   2033	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
   2034		/* The freq is an upper bound. opp should be lower */
   2035		opp = dev_pm_opp_find_freq_floor(dev, freq);
   2036
   2037		/* If not available, use the closest opp */
   2038		if (opp == ERR_PTR(-ERANGE))
   2039			opp = dev_pm_opp_find_freq_ceil(dev, freq);
   2040	} else {
   2041		/* The freq is an lower bound. opp should be higher */
   2042		opp = dev_pm_opp_find_freq_ceil(dev, freq);
   2043
   2044		/* If not available, use the closest opp */
   2045		if (opp == ERR_PTR(-ERANGE))
   2046			opp = dev_pm_opp_find_freq_floor(dev, freq);
   2047	}
   2048
   2049	return opp;
   2050}
   2051EXPORT_SYMBOL(devfreq_recommended_opp);
   2052
   2053/**
   2054 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
   2055 *				     for any changes in the OPP availability
   2056 *				     changes
   2057 * @dev:	The devfreq user device. (parent of devfreq)
   2058 * @devfreq:	The devfreq object.
   2059 */
   2060int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
   2061{
   2062	return dev_pm_opp_register_notifier(dev, &devfreq->nb);
   2063}
   2064EXPORT_SYMBOL(devfreq_register_opp_notifier);
   2065
   2066/**
   2067 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
   2068 *				       notified for any changes in the OPP
   2069 *				       availability changes anymore.
   2070 * @dev:	The devfreq user device. (parent of devfreq)
   2071 * @devfreq:	The devfreq object.
   2072 *
   2073 * At exit() callback of devfreq_dev_profile, this must be included if
   2074 * devfreq_recommended_opp is used.
   2075 */
   2076int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
   2077{
   2078	return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
   2079}
   2080EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
   2081
   2082static void devm_devfreq_opp_release(struct device *dev, void *res)
   2083{
   2084	devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
   2085}
   2086
   2087/**
   2088 * devm_devfreq_register_opp_notifier() - Resource-managed
   2089 *					  devfreq_register_opp_notifier()
   2090 * @dev:	The devfreq user device. (parent of devfreq)
   2091 * @devfreq:	The devfreq object.
   2092 */
   2093int devm_devfreq_register_opp_notifier(struct device *dev,
   2094				       struct devfreq *devfreq)
   2095{
   2096	struct devfreq **ptr;
   2097	int ret;
   2098
   2099	ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
   2100	if (!ptr)
   2101		return -ENOMEM;
   2102
   2103	ret = devfreq_register_opp_notifier(dev, devfreq);
   2104	if (ret) {
   2105		devres_free(ptr);
   2106		return ret;
   2107	}
   2108
   2109	*ptr = devfreq;
   2110	devres_add(dev, ptr);
   2111
   2112	return 0;
   2113}
   2114EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
   2115
   2116/**
   2117 * devm_devfreq_unregister_opp_notifier() - Resource-managed
   2118 *					    devfreq_unregister_opp_notifier()
   2119 * @dev:	The devfreq user device. (parent of devfreq)
   2120 * @devfreq:	The devfreq object.
   2121 */
   2122void devm_devfreq_unregister_opp_notifier(struct device *dev,
   2123					 struct devfreq *devfreq)
   2124{
   2125	WARN_ON(devres_release(dev, devm_devfreq_opp_release,
   2126			       devm_devfreq_dev_match, devfreq));
   2127}
   2128EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
   2129
   2130/**
   2131 * devfreq_register_notifier() - Register a driver with devfreq
   2132 * @devfreq:	The devfreq object.
   2133 * @nb:		The notifier block to register.
   2134 * @list:	DEVFREQ_TRANSITION_NOTIFIER.
   2135 */
   2136int devfreq_register_notifier(struct devfreq *devfreq,
   2137			      struct notifier_block *nb,
   2138			      unsigned int list)
   2139{
   2140	int ret = 0;
   2141
   2142	if (!devfreq)
   2143		return -EINVAL;
   2144
   2145	switch (list) {
   2146	case DEVFREQ_TRANSITION_NOTIFIER:
   2147		ret = srcu_notifier_chain_register(
   2148				&devfreq->transition_notifier_list, nb);
   2149		break;
   2150	default:
   2151		ret = -EINVAL;
   2152	}
   2153
   2154	return ret;
   2155}
   2156EXPORT_SYMBOL(devfreq_register_notifier);
   2157
   2158/*
   2159 * devfreq_unregister_notifier() - Unregister a driver with devfreq
   2160 * @devfreq:	The devfreq object.
   2161 * @nb:		The notifier block to be unregistered.
   2162 * @list:	DEVFREQ_TRANSITION_NOTIFIER.
   2163 */
   2164int devfreq_unregister_notifier(struct devfreq *devfreq,
   2165				struct notifier_block *nb,
   2166				unsigned int list)
   2167{
   2168	int ret = 0;
   2169
   2170	if (!devfreq)
   2171		return -EINVAL;
   2172
   2173	switch (list) {
   2174	case DEVFREQ_TRANSITION_NOTIFIER:
   2175		ret = srcu_notifier_chain_unregister(
   2176				&devfreq->transition_notifier_list, nb);
   2177		break;
   2178	default:
   2179		ret = -EINVAL;
   2180	}
   2181
   2182	return ret;
   2183}
   2184EXPORT_SYMBOL(devfreq_unregister_notifier);
   2185
   2186struct devfreq_notifier_devres {
   2187	struct devfreq *devfreq;
   2188	struct notifier_block *nb;
   2189	unsigned int list;
   2190};
   2191
   2192static void devm_devfreq_notifier_release(struct device *dev, void *res)
   2193{
   2194	struct devfreq_notifier_devres *this = res;
   2195
   2196	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
   2197}
   2198
   2199/**
   2200 * devm_devfreq_register_notifier()
   2201 *	- Resource-managed devfreq_register_notifier()
   2202 * @dev:	The devfreq user device. (parent of devfreq)
   2203 * @devfreq:	The devfreq object.
   2204 * @nb:		The notifier block to be unregistered.
   2205 * @list:	DEVFREQ_TRANSITION_NOTIFIER.
   2206 */
   2207int devm_devfreq_register_notifier(struct device *dev,
   2208				struct devfreq *devfreq,
   2209				struct notifier_block *nb,
   2210				unsigned int list)
   2211{
   2212	struct devfreq_notifier_devres *ptr;
   2213	int ret;
   2214
   2215	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
   2216				GFP_KERNEL);
   2217	if (!ptr)
   2218		return -ENOMEM;
   2219
   2220	ret = devfreq_register_notifier(devfreq, nb, list);
   2221	if (ret) {
   2222		devres_free(ptr);
   2223		return ret;
   2224	}
   2225
   2226	ptr->devfreq = devfreq;
   2227	ptr->nb = nb;
   2228	ptr->list = list;
   2229	devres_add(dev, ptr);
   2230
   2231	return 0;
   2232}
   2233EXPORT_SYMBOL(devm_devfreq_register_notifier);
   2234
   2235/**
   2236 * devm_devfreq_unregister_notifier()
   2237 *	- Resource-managed devfreq_unregister_notifier()
   2238 * @dev:	The devfreq user device. (parent of devfreq)
   2239 * @devfreq:	The devfreq object.
   2240 * @nb:		The notifier block to be unregistered.
   2241 * @list:	DEVFREQ_TRANSITION_NOTIFIER.
   2242 */
   2243void devm_devfreq_unregister_notifier(struct device *dev,
   2244				      struct devfreq *devfreq,
   2245				      struct notifier_block *nb,
   2246				      unsigned int list)
   2247{
   2248	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
   2249			       devm_devfreq_dev_match, devfreq));
   2250}
   2251EXPORT_SYMBOL(devm_devfreq_unregister_notifier);