cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tegra194-cpufreq.c (17077B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
      4 */
      5
      6#include <linux/cpu.h>
      7#include <linux/cpufreq.h>
      8#include <linux/delay.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/module.h>
     11#include <linux/of.h>
     12#include <linux/of_platform.h>
     13#include <linux/platform_device.h>
     14#include <linux/slab.h>
     15
     16#include <asm/smp_plat.h>
     17
     18#include <soc/tegra/bpmp.h>
     19#include <soc/tegra/bpmp-abi.h>
     20
     21#define KHZ                     1000
     22#define REF_CLK_MHZ             408 /* 408 MHz */
     23#define US_DELAY                500
     24#define CPUFREQ_TBL_STEP_HZ     (50 * KHZ * KHZ)
     25#define MAX_CNT                 ~0U
     26
     27#define NDIV_MASK              0x1FF
     28
     29#define CORE_OFFSET(cpu)			(cpu * 8)
     30#define CMU_CLKS_BASE				0x2000
     31#define SCRATCH_FREQ_CORE_REG(data, cpu)	(data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
     32
     33#define MMCRAB_CLUSTER_BASE(cl)			(0x30000 + (cl * 0x10000))
     34#define CLUSTER_ACTMON_BASE(data, cl) \
     35			(data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
     36#define CORE_ACTMON_CNTR_REG(data, cl, cpu)	(CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
     37
     38/* cpufreq transisition latency */
     39#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
     40
     41enum cluster {
     42	CLUSTER0,
     43	CLUSTER1,
     44	CLUSTER2,
     45	CLUSTER3,
     46	MAX_CLUSTERS,
     47};
     48
     49struct tegra_cpu_ctr {
     50	u32 cpu;
     51	u32 coreclk_cnt, last_coreclk_cnt;
     52	u32 refclk_cnt, last_refclk_cnt;
     53};
     54
     55struct read_counters_work {
     56	struct work_struct work;
     57	struct tegra_cpu_ctr c;
     58};
     59
     60struct tegra_cpufreq_ops {
     61	void (*read_counters)(struct tegra_cpu_ctr *c);
     62	void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
     63	void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
     64	int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
     65};
     66
     67struct tegra_cpufreq_soc {
     68	struct tegra_cpufreq_ops *ops;
     69	int maxcpus_per_cluster;
     70	phys_addr_t actmon_cntr_base;
     71};
     72
     73struct tegra194_cpufreq_data {
     74	void __iomem *regs;
     75	size_t num_clusters;
     76	struct cpufreq_frequency_table **tables;
     77	const struct tegra_cpufreq_soc *soc;
     78};
     79
     80static struct workqueue_struct *read_counters_wq;
     81
     82static void tegra_get_cpu_mpidr(void *mpidr)
     83{
     84	*((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
     85}
     86
     87static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
     88{
     89	u64 mpidr;
     90
     91	smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
     92
     93	if (cpuid)
     94		*cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
     95	if (clusterid)
     96		*clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
     97}
     98
     99static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
    100{
    101	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    102	void __iomem *freq_core_reg;
    103	u64 mpidr_id;
    104
    105	/* use physical id to get address of per core frequency register */
    106	mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
    107	freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
    108
    109	*ndiv = readl(freq_core_reg) & NDIV_MASK;
    110
    111	return 0;
    112}
    113
    114static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
    115{
    116	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    117	void __iomem *freq_core_reg;
    118	u32 cpu, cpuid, clusterid;
    119	u64 mpidr_id;
    120
    121	for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
    122		data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
    123
    124		/* use physical id to get address of per core frequency register */
    125		mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
    126		freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
    127
    128		writel(ndiv, freq_core_reg);
    129	}
    130}
    131
    132/*
    133 * This register provides access to two counter values with a single
    134 * 64-bit read. The counter values are used to determine the average
    135 * actual frequency a core has run at over a period of time.
    136 *     [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
    137 *     [31:0] Core clock counter: Counts on every core clock cycle
    138 */
    139static void tegra234_read_counters(struct tegra_cpu_ctr *c)
    140{
    141	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    142	void __iomem *actmon_reg;
    143	u32 cpuid, clusterid;
    144	u64 val;
    145
    146	data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
    147	actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
    148
    149	val = readq(actmon_reg);
    150	c->last_refclk_cnt = upper_32_bits(val);
    151	c->last_coreclk_cnt = lower_32_bits(val);
    152	udelay(US_DELAY);
    153	val = readq(actmon_reg);
    154	c->refclk_cnt = upper_32_bits(val);
    155	c->coreclk_cnt = lower_32_bits(val);
    156}
    157
    158static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
    159	.read_counters = tegra234_read_counters,
    160	.get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
    161	.get_cpu_ndiv = tegra234_get_cpu_ndiv,
    162	.set_cpu_ndiv = tegra234_set_cpu_ndiv,
    163};
    164
    165const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
    166	.ops = &tegra234_cpufreq_ops,
    167	.actmon_cntr_base = 0x9000,
    168	.maxcpus_per_cluster = 4,
    169};
    170
    171static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
    172{
    173	u64 mpidr;
    174
    175	smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
    176
    177	if (cpuid)
    178		*cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
    179	if (clusterid)
    180		*clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
    181}
    182
    183/*
    184 * Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
    185 * The register provides frequency feedback information to
    186 * determine the average actual frequency a core has run at over
    187 * a period of time.
    188 *	[31:0] PLLP counter: Counts at fixed frequency (408 MHz)
    189 *	[63:32] Core clock counter: counts on every core clock cycle
    190 *			where the core is architecturally clocking
    191 */
    192static u64 read_freq_feedback(void)
    193{
    194	u64 val = 0;
    195
    196	asm volatile("mrs %0, s3_0_c15_c0_5" : "=r" (val) : );
    197
    198	return val;
    199}
    200
    201static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
    202				   *nltbl, u16 ndiv)
    203{
    204	return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
    205}
    206
    207static void tegra194_read_counters(struct tegra_cpu_ctr *c)
    208{
    209	u64 val;
    210
    211	val = read_freq_feedback();
    212	c->last_refclk_cnt = lower_32_bits(val);
    213	c->last_coreclk_cnt = upper_32_bits(val);
    214	udelay(US_DELAY);
    215	val = read_freq_feedback();
    216	c->refclk_cnt = lower_32_bits(val);
    217	c->coreclk_cnt = upper_32_bits(val);
    218}
    219
    220static void tegra_read_counters(struct work_struct *work)
    221{
    222	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    223	struct read_counters_work *read_counters_work;
    224	struct tegra_cpu_ctr *c;
    225
    226	/*
    227	 * ref_clk_counter(32 bit counter) runs on constant clk,
    228	 * pll_p(408MHz).
    229	 * It will take = 2 ^ 32 / 408 MHz to overflow ref clk counter
    230	 *              = 10526880 usec = 10.527 sec to overflow
    231	 *
    232	 * Like wise core_clk_counter(32 bit counter) runs on core clock.
    233	 * It's synchronized to crab_clk (cpu_crab_clk) which runs at
    234	 * freq of cluster. Assuming max cluster clock ~2000MHz,
    235	 * It will take = 2 ^ 32 / 2000 MHz to overflow core clk counter
    236	 *              = ~2.147 sec to overflow
    237	 */
    238	read_counters_work = container_of(work, struct read_counters_work,
    239					  work);
    240	c = &read_counters_work->c;
    241
    242	data->soc->ops->read_counters(c);
    243}
    244
    245/*
    246 * Return instantaneous cpu speed
    247 * Instantaneous freq is calculated as -
    248 * -Takes sample on every query of getting the freq.
    249 *	- Read core and ref clock counters;
    250 *	- Delay for X us
    251 *	- Read above cycle counters again
    252 *	- Calculates freq by subtracting current and previous counters
    253 *	  divided by the delay time or eqv. of ref_clk_counter in delta time
    254 *	- Return Kcycles/second, freq in KHz
    255 *
    256 *	delta time period = x sec
    257 *			  = delta ref_clk_counter / (408 * 10^6) sec
    258 *	freq in Hz = cycles/sec
    259 *		   = (delta cycles / x sec
    260 *		   = (delta cycles * 408 * 10^6) / delta ref_clk_counter
    261 *	in KHz	   = (delta cycles * 408 * 10^3) / delta ref_clk_counter
    262 *
    263 * @cpu - logical cpu whose freq to be updated
    264 * Returns freq in KHz on success, 0 if cpu is offline
    265 */
    266static unsigned int tegra194_calculate_speed(u32 cpu)
    267{
    268	struct read_counters_work read_counters_work;
    269	struct tegra_cpu_ctr c;
    270	u32 delta_refcnt;
    271	u32 delta_ccnt;
    272	u32 rate_mhz;
    273
    274	/*
    275	 * udelay() is required to reconstruct cpu frequency over an
    276	 * observation window. Using workqueue to call udelay() with
    277	 * interrupts enabled.
    278	 */
    279	read_counters_work.c.cpu = cpu;
    280	INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
    281	queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
    282	flush_work(&read_counters_work.work);
    283	c = read_counters_work.c;
    284
    285	if (c.coreclk_cnt < c.last_coreclk_cnt)
    286		delta_ccnt = c.coreclk_cnt + (MAX_CNT - c.last_coreclk_cnt);
    287	else
    288		delta_ccnt = c.coreclk_cnt - c.last_coreclk_cnt;
    289	if (!delta_ccnt)
    290		return 0;
    291
    292	/* ref clock is 32 bits */
    293	if (c.refclk_cnt < c.last_refclk_cnt)
    294		delta_refcnt = c.refclk_cnt + (MAX_CNT - c.last_refclk_cnt);
    295	else
    296		delta_refcnt = c.refclk_cnt - c.last_refclk_cnt;
    297	if (!delta_refcnt) {
    298		pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
    299		return 0;
    300	}
    301	rate_mhz = ((unsigned long)(delta_ccnt * REF_CLK_MHZ)) / delta_refcnt;
    302
    303	return (rate_mhz * KHZ); /* in KHz */
    304}
    305
    306static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
    307{
    308	u64 ndiv_val;
    309
    310	asm volatile("mrs %0, s3_0_c15_c0_4" : "=r" (ndiv_val) : );
    311
    312	*(u64 *)ndiv = ndiv_val;
    313}
    314
    315static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
    316{
    317	int ret;
    318
    319	ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
    320
    321	return ret;
    322}
    323
    324static void tegra194_set_cpu_ndiv_sysreg(void *data)
    325{
    326	u64 ndiv_val = *(u64 *)data;
    327
    328	asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
    329}
    330
    331static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
    332{
    333	on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
    334}
    335
    336static unsigned int tegra194_get_speed(u32 cpu)
    337{
    338	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    339	struct cpufreq_frequency_table *pos;
    340	u32 cpuid, clusterid;
    341	unsigned int rate;
    342	u64 ndiv;
    343	int ret;
    344
    345	data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
    346
    347	/* reconstruct actual cpu freq using counters */
    348	rate = tegra194_calculate_speed(cpu);
    349
    350	/* get last written ndiv value */
    351	ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
    352	if (WARN_ON_ONCE(ret))
    353		return rate;
    354
    355	/*
    356	 * If the reconstructed frequency has acceptable delta from
    357	 * the last written value, then return freq corresponding
    358	 * to the last written ndiv value from freq_table. This is
    359	 * done to return consistent value.
    360	 */
    361	cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) {
    362		if (pos->driver_data != ndiv)
    363			continue;
    364
    365		if (abs(pos->frequency - rate) > 115200) {
    366			pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
    367				cpu, rate, pos->frequency, ndiv);
    368		} else {
    369			rate = pos->frequency;
    370		}
    371		break;
    372	}
    373	return rate;
    374}
    375
    376static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
    377{
    378	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    379	int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
    380	u32 start_cpu, cpu;
    381	u32 clusterid;
    382
    383	data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
    384
    385	if (clusterid >= data->num_clusters || !data->tables[clusterid])
    386		return -EINVAL;
    387
    388	start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
    389	/* set same policy for all cpus in a cluster */
    390	for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
    391		if (cpu_possible(cpu))
    392			cpumask_set_cpu(cpu, policy->cpus);
    393	}
    394	policy->freq_table = data->tables[clusterid];
    395	policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
    396
    397	return 0;
    398}
    399
    400static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
    401				       unsigned int index)
    402{
    403	struct cpufreq_frequency_table *tbl = policy->freq_table + index;
    404	struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
    405
    406	/*
    407	 * Each core writes frequency in per core register. Then both cores
    408	 * in a cluster run at same frequency which is the maximum frequency
    409	 * request out of the values requested by both cores in that cluster.
    410	 */
    411	data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
    412
    413	return 0;
    414}
    415
    416static struct cpufreq_driver tegra194_cpufreq_driver = {
    417	.name = "tegra194",
    418	.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
    419	.verify = cpufreq_generic_frequency_table_verify,
    420	.target_index = tegra194_cpufreq_set_target,
    421	.get = tegra194_get_speed,
    422	.init = tegra194_cpufreq_init,
    423	.attr = cpufreq_generic_attr,
    424};
    425
    426static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
    427	.read_counters = tegra194_read_counters,
    428	.get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
    429	.get_cpu_ndiv = tegra194_get_cpu_ndiv,
    430	.set_cpu_ndiv = tegra194_set_cpu_ndiv,
    431};
    432
    433const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
    434	.ops = &tegra194_cpufreq_ops,
    435	.maxcpus_per_cluster = 2,
    436};
    437
    438static void tegra194_cpufreq_free_resources(void)
    439{
    440	destroy_workqueue(read_counters_wq);
    441}
    442
    443static struct cpufreq_frequency_table *
    444init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
    445		unsigned int cluster_id)
    446{
    447	struct cpufreq_frequency_table *freq_table;
    448	struct mrq_cpu_ndiv_limits_response resp;
    449	unsigned int num_freqs, ndiv, delta_ndiv;
    450	struct mrq_cpu_ndiv_limits_request req;
    451	struct tegra_bpmp_message msg;
    452	u16 freq_table_step_size;
    453	int err, index;
    454
    455	memset(&req, 0, sizeof(req));
    456	req.cluster_id = cluster_id;
    457
    458	memset(&msg, 0, sizeof(msg));
    459	msg.mrq = MRQ_CPU_NDIV_LIMITS;
    460	msg.tx.data = &req;
    461	msg.tx.size = sizeof(req);
    462	msg.rx.data = &resp;
    463	msg.rx.size = sizeof(resp);
    464
    465	err = tegra_bpmp_transfer(bpmp, &msg);
    466	if (err)
    467		return ERR_PTR(err);
    468	if (msg.rx.ret == -BPMP_EINVAL) {
    469		/* Cluster not available */
    470		return NULL;
    471	}
    472	if (msg.rx.ret)
    473		return ERR_PTR(-EINVAL);
    474
    475	/*
    476	 * Make sure frequency table step is a multiple of mdiv to match
    477	 * vhint table granularity.
    478	 */
    479	freq_table_step_size = resp.mdiv *
    480			DIV_ROUND_UP(CPUFREQ_TBL_STEP_HZ, resp.ref_clk_hz);
    481
    482	dev_dbg(&pdev->dev, "cluster %d: frequency table step size: %d\n",
    483		cluster_id, freq_table_step_size);
    484
    485	delta_ndiv = resp.ndiv_max - resp.ndiv_min;
    486
    487	if (unlikely(delta_ndiv == 0)) {
    488		num_freqs = 1;
    489	} else {
    490		/* We store both ndiv_min and ndiv_max hence the +1 */
    491		num_freqs = delta_ndiv / freq_table_step_size + 1;
    492	}
    493
    494	num_freqs += (delta_ndiv % freq_table_step_size) ? 1 : 0;
    495
    496	freq_table = devm_kcalloc(&pdev->dev, num_freqs + 1,
    497				  sizeof(*freq_table), GFP_KERNEL);
    498	if (!freq_table)
    499		return ERR_PTR(-ENOMEM);
    500
    501	for (index = 0, ndiv = resp.ndiv_min;
    502			ndiv < resp.ndiv_max;
    503			index++, ndiv += freq_table_step_size) {
    504		freq_table[index].driver_data = ndiv;
    505		freq_table[index].frequency = map_ndiv_to_freq(&resp, ndiv);
    506	}
    507
    508	freq_table[index].driver_data = resp.ndiv_max;
    509	freq_table[index++].frequency = map_ndiv_to_freq(&resp, resp.ndiv_max);
    510	freq_table[index].frequency = CPUFREQ_TABLE_END;
    511
    512	return freq_table;
    513}
    514
    515static int tegra194_cpufreq_probe(struct platform_device *pdev)
    516{
    517	const struct tegra_cpufreq_soc *soc;
    518	struct tegra194_cpufreq_data *data;
    519	struct tegra_bpmp *bpmp;
    520	int err, i;
    521
    522	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
    523	if (!data)
    524		return -ENOMEM;
    525
    526	soc = of_device_get_match_data(&pdev->dev);
    527
    528	if (soc->ops && soc->maxcpus_per_cluster) {
    529		data->soc = soc;
    530	} else {
    531		dev_err(&pdev->dev, "soc data missing\n");
    532		return -EINVAL;
    533	}
    534
    535	data->num_clusters = MAX_CLUSTERS;
    536	data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
    537				    sizeof(*data->tables), GFP_KERNEL);
    538	if (!data->tables)
    539		return -ENOMEM;
    540
    541	if (soc->actmon_cntr_base) {
    542		/* mmio registers are used for frequency request and re-construction */
    543		data->regs = devm_platform_ioremap_resource(pdev, 0);
    544		if (IS_ERR(data->regs))
    545			return PTR_ERR(data->regs);
    546	}
    547
    548	platform_set_drvdata(pdev, data);
    549
    550	bpmp = tegra_bpmp_get(&pdev->dev);
    551	if (IS_ERR(bpmp))
    552		return PTR_ERR(bpmp);
    553
    554	read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
    555	if (!read_counters_wq) {
    556		dev_err(&pdev->dev, "fail to create_workqueue\n");
    557		err = -EINVAL;
    558		goto put_bpmp;
    559	}
    560
    561	for (i = 0; i < data->num_clusters; i++) {
    562		data->tables[i] = init_freq_table(pdev, bpmp, i);
    563		if (IS_ERR(data->tables[i])) {
    564			err = PTR_ERR(data->tables[i]);
    565			goto err_free_res;
    566		}
    567	}
    568
    569	tegra194_cpufreq_driver.driver_data = data;
    570
    571	err = cpufreq_register_driver(&tegra194_cpufreq_driver);
    572	if (!err)
    573		goto put_bpmp;
    574
    575err_free_res:
    576	tegra194_cpufreq_free_resources();
    577put_bpmp:
    578	tegra_bpmp_put(bpmp);
    579	return err;
    580}
    581
    582static int tegra194_cpufreq_remove(struct platform_device *pdev)
    583{
    584	cpufreq_unregister_driver(&tegra194_cpufreq_driver);
    585	tegra194_cpufreq_free_resources();
    586
    587	return 0;
    588}
    589
    590static const struct of_device_id tegra194_cpufreq_of_match[] = {
    591	{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
    592	{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
    593	{ /* sentinel */ }
    594};
    595
    596static struct platform_driver tegra194_ccplex_driver = {
    597	.driver = {
    598		.name = "tegra194-cpufreq",
    599		.of_match_table = tegra194_cpufreq_of_match,
    600	},
    601	.probe = tegra194_cpufreq_probe,
    602	.remove = tegra194_cpufreq_remove,
    603};
    604module_platform_driver(tegra194_ccplex_driver);
    605
    606MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
    607MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
    608MODULE_DESCRIPTION("NVIDIA Tegra194 cpufreq driver");
    609MODULE_LICENSE("GPL v2");