cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpufreq-dt.c (9054B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2012 Freescale Semiconductor, Inc.
      4 *
      5 * Copyright (C) 2014 Linaro.
      6 * Viresh Kumar <viresh.kumar@linaro.org>
      7 */
      8
      9#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
     10
     11#include <linux/clk.h>
     12#include <linux/cpu.h>
     13#include <linux/cpufreq.h>
     14#include <linux/cpumask.h>
     15#include <linux/err.h>
     16#include <linux/list.h>
     17#include <linux/module.h>
     18#include <linux/of.h>
     19#include <linux/pm_opp.h>
     20#include <linux/platform_device.h>
     21#include <linux/regulator/consumer.h>
     22#include <linux/slab.h>
     23#include <linux/thermal.h>
     24
     25#include "cpufreq-dt.h"
     26
     27struct private_data {
     28	struct list_head node;
     29
     30	cpumask_var_t cpus;
     31	struct device *cpu_dev;
     32	struct opp_table *opp_table;
     33	struct cpufreq_frequency_table *freq_table;
     34	bool have_static_opps;
     35};
     36
     37static LIST_HEAD(priv_list);
     38
     39static struct freq_attr *cpufreq_dt_attr[] = {
     40	&cpufreq_freq_attr_scaling_available_freqs,
     41	NULL,   /* Extra space for boost-attr if required */
     42	NULL,
     43};
     44
     45static struct private_data *cpufreq_dt_find_data(int cpu)
     46{
     47	struct private_data *priv;
     48
     49	list_for_each_entry(priv, &priv_list, node) {
     50		if (cpumask_test_cpu(cpu, priv->cpus))
     51			return priv;
     52	}
     53
     54	return NULL;
     55}
     56
     57static int set_target(struct cpufreq_policy *policy, unsigned int index)
     58{
     59	struct private_data *priv = policy->driver_data;
     60	unsigned long freq = policy->freq_table[index].frequency;
     61
     62	return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
     63}
     64
     65/*
     66 * An earlier version of opp-v1 bindings used to name the regulator
     67 * "cpu0-supply", we still need to handle that for backwards compatibility.
     68 */
     69static const char *find_supply_name(struct device *dev)
     70{
     71	struct device_node *np;
     72	struct property *pp;
     73	int cpu = dev->id;
     74	const char *name = NULL;
     75
     76	np = of_node_get(dev->of_node);
     77
     78	/* This must be valid for sure */
     79	if (WARN_ON(!np))
     80		return NULL;
     81
     82	/* Try "cpu0" for older DTs */
     83	if (!cpu) {
     84		pp = of_find_property(np, "cpu0-supply", NULL);
     85		if (pp) {
     86			name = "cpu0";
     87			goto node_put;
     88		}
     89	}
     90
     91	pp = of_find_property(np, "cpu-supply", NULL);
     92	if (pp) {
     93		name = "cpu";
     94		goto node_put;
     95	}
     96
     97	dev_dbg(dev, "no regulator for cpu%d\n", cpu);
     98node_put:
     99	of_node_put(np);
    100	return name;
    101}
    102
    103static int cpufreq_init(struct cpufreq_policy *policy)
    104{
    105	struct private_data *priv;
    106	struct device *cpu_dev;
    107	struct clk *cpu_clk;
    108	unsigned int transition_latency;
    109	int ret;
    110
    111	priv = cpufreq_dt_find_data(policy->cpu);
    112	if (!priv) {
    113		pr_err("failed to find data for cpu%d\n", policy->cpu);
    114		return -ENODEV;
    115	}
    116	cpu_dev = priv->cpu_dev;
    117
    118	cpu_clk = clk_get(cpu_dev, NULL);
    119	if (IS_ERR(cpu_clk)) {
    120		ret = PTR_ERR(cpu_clk);
    121		dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
    122		return ret;
    123	}
    124
    125	transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
    126	if (!transition_latency)
    127		transition_latency = CPUFREQ_ETERNAL;
    128
    129	cpumask_copy(policy->cpus, priv->cpus);
    130	policy->driver_data = priv;
    131	policy->clk = cpu_clk;
    132	policy->freq_table = priv->freq_table;
    133	policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
    134	policy->cpuinfo.transition_latency = transition_latency;
    135	policy->dvfs_possible_from_any_cpu = true;
    136
    137	/* Support turbo/boost mode */
    138	if (policy_has_boost_freq(policy)) {
    139		/* This gets disabled by core on driver unregister */
    140		ret = cpufreq_enable_boost_support();
    141		if (ret)
    142			goto out_clk_put;
    143		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
    144	}
    145
    146	return 0;
    147
    148out_clk_put:
    149	clk_put(cpu_clk);
    150
    151	return ret;
    152}
    153
    154static int cpufreq_online(struct cpufreq_policy *policy)
    155{
    156	/* We did light-weight tear down earlier, nothing to do here */
    157	return 0;
    158}
    159
    160static int cpufreq_offline(struct cpufreq_policy *policy)
    161{
    162	/*
    163	 * Preserve policy->driver_data and don't free resources on light-weight
    164	 * tear down.
    165	 */
    166	return 0;
    167}
    168
    169static int cpufreq_exit(struct cpufreq_policy *policy)
    170{
    171	clk_put(policy->clk);
    172	return 0;
    173}
    174
    175static struct cpufreq_driver dt_cpufreq_driver = {
    176	.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
    177		 CPUFREQ_IS_COOLING_DEV,
    178	.verify = cpufreq_generic_frequency_table_verify,
    179	.target_index = set_target,
    180	.get = cpufreq_generic_get,
    181	.init = cpufreq_init,
    182	.exit = cpufreq_exit,
    183	.online = cpufreq_online,
    184	.offline = cpufreq_offline,
    185	.register_em = cpufreq_register_em_with_opp,
    186	.name = "cpufreq-dt",
    187	.attr = cpufreq_dt_attr,
    188	.suspend = cpufreq_generic_suspend,
    189};
    190
    191static int dt_cpufreq_early_init(struct device *dev, int cpu)
    192{
    193	struct private_data *priv;
    194	struct device *cpu_dev;
    195	bool fallback = false;
    196	const char *reg_name;
    197	int ret;
    198
    199	/* Check if this CPU is already covered by some other policy */
    200	if (cpufreq_dt_find_data(cpu))
    201		return 0;
    202
    203	cpu_dev = get_cpu_device(cpu);
    204	if (!cpu_dev)
    205		return -EPROBE_DEFER;
    206
    207	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
    208	if (!priv)
    209		return -ENOMEM;
    210
    211	if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
    212		return -ENOMEM;
    213
    214	cpumask_set_cpu(cpu, priv->cpus);
    215	priv->cpu_dev = cpu_dev;
    216
    217	/*
    218	 * OPP layer will be taking care of regulators now, but it needs to know
    219	 * the name of the regulator first.
    220	 */
    221	reg_name = find_supply_name(cpu_dev);
    222	if (reg_name) {
    223		priv->opp_table = dev_pm_opp_set_regulators(cpu_dev, &reg_name,
    224							    1);
    225		if (IS_ERR(priv->opp_table)) {
    226			ret = PTR_ERR(priv->opp_table);
    227			if (ret != -EPROBE_DEFER)
    228				dev_err(cpu_dev, "failed to set regulators: %d\n",
    229					ret);
    230			goto free_cpumask;
    231		}
    232	}
    233
    234	/* Get OPP-sharing information from "operating-points-v2" bindings */
    235	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
    236	if (ret) {
    237		if (ret != -ENOENT)
    238			goto out;
    239
    240		/*
    241		 * operating-points-v2 not supported, fallback to all CPUs share
    242		 * OPP for backward compatibility if the platform hasn't set
    243		 * sharing CPUs.
    244		 */
    245		if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus))
    246			fallback = true;
    247	}
    248
    249	/*
    250	 * Initialize OPP tables for all priv->cpus. They will be shared by
    251	 * all CPUs which have marked their CPUs shared with OPP bindings.
    252	 *
    253	 * For platforms not using operating-points-v2 bindings, we do this
    254	 * before updating priv->cpus. Otherwise, we will end up creating
    255	 * duplicate OPPs for the CPUs.
    256	 *
    257	 * OPPs might be populated at runtime, don't fail for error here unless
    258	 * it is -EPROBE_DEFER.
    259	 */
    260	ret = dev_pm_opp_of_cpumask_add_table(priv->cpus);
    261	if (!ret) {
    262		priv->have_static_opps = true;
    263	} else if (ret == -EPROBE_DEFER) {
    264		goto out;
    265	}
    266
    267	/*
    268	 * The OPP table must be initialized, statically or dynamically, by this
    269	 * point.
    270	 */
    271	ret = dev_pm_opp_get_opp_count(cpu_dev);
    272	if (ret <= 0) {
    273		dev_err(cpu_dev, "OPP table can't be empty\n");
    274		ret = -ENODEV;
    275		goto out;
    276	}
    277
    278	if (fallback) {
    279		cpumask_setall(priv->cpus);
    280		ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
    281		if (ret)
    282			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
    283				__func__, ret);
    284	}
    285
    286	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &priv->freq_table);
    287	if (ret) {
    288		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
    289		goto out;
    290	}
    291
    292	list_add(&priv->node, &priv_list);
    293	return 0;
    294
    295out:
    296	if (priv->have_static_opps)
    297		dev_pm_opp_of_cpumask_remove_table(priv->cpus);
    298	dev_pm_opp_put_regulators(priv->opp_table);
    299free_cpumask:
    300	free_cpumask_var(priv->cpus);
    301	return ret;
    302}
    303
    304static void dt_cpufreq_release(void)
    305{
    306	struct private_data *priv, *tmp;
    307
    308	list_for_each_entry_safe(priv, tmp, &priv_list, node) {
    309		dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &priv->freq_table);
    310		if (priv->have_static_opps)
    311			dev_pm_opp_of_cpumask_remove_table(priv->cpus);
    312		dev_pm_opp_put_regulators(priv->opp_table);
    313		free_cpumask_var(priv->cpus);
    314		list_del(&priv->node);
    315	}
    316}
    317
    318static int dt_cpufreq_probe(struct platform_device *pdev)
    319{
    320	struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
    321	int ret, cpu;
    322
    323	/* Request resources early so we can return in case of -EPROBE_DEFER */
    324	for_each_possible_cpu(cpu) {
    325		ret = dt_cpufreq_early_init(&pdev->dev, cpu);
    326		if (ret)
    327			goto err;
    328	}
    329
    330	if (data) {
    331		if (data->have_governor_per_policy)
    332			dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
    333
    334		dt_cpufreq_driver.resume = data->resume;
    335		if (data->suspend)
    336			dt_cpufreq_driver.suspend = data->suspend;
    337		if (data->get_intermediate) {
    338			dt_cpufreq_driver.target_intermediate = data->target_intermediate;
    339			dt_cpufreq_driver.get_intermediate = data->get_intermediate;
    340		}
    341	}
    342
    343	ret = cpufreq_register_driver(&dt_cpufreq_driver);
    344	if (ret) {
    345		dev_err(&pdev->dev, "failed register driver: %d\n", ret);
    346		goto err;
    347	}
    348
    349	return 0;
    350err:
    351	dt_cpufreq_release();
    352	return ret;
    353}
    354
    355static int dt_cpufreq_remove(struct platform_device *pdev)
    356{
    357	cpufreq_unregister_driver(&dt_cpufreq_driver);
    358	dt_cpufreq_release();
    359	return 0;
    360}
    361
    362static struct platform_driver dt_cpufreq_platdrv = {
    363	.driver = {
    364		.name	= "cpufreq-dt",
    365	},
    366	.probe		= dt_cpufreq_probe,
    367	.remove		= dt_cpufreq_remove,
    368};
    369module_platform_driver(dt_cpufreq_platdrv);
    370
    371MODULE_ALIAS("platform:cpufreq-dt");
    372MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
    373MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
    374MODULE_DESCRIPTION("Generic cpufreq driver");
    375MODULE_LICENSE("GPL");