cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

clk-rpm.c (17431B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2016, Linaro Limited
      4 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
      5 */
      6
      7#include <linux/clk-provider.h>
      8#include <linux/err.h>
      9#include <linux/export.h>
     10#include <linux/init.h>
     11#include <linux/kernel.h>
     12#include <linux/module.h>
     13#include <linux/mutex.h>
     14#include <linux/mfd/qcom_rpm.h>
     15#include <linux/of.h>
     16#include <linux/of_device.h>
     17#include <linux/platform_device.h>
     18
     19#include <dt-bindings/mfd/qcom-rpm.h>
     20#include <dt-bindings/clock/qcom,rpmcc.h>
     21
     22#define QCOM_RPM_MISC_CLK_TYPE				0x306b6c63
     23#define QCOM_RPM_SCALING_ENABLE_ID			0x2
     24#define QCOM_RPM_XO_MODE_ON				0x2
     25
     26#define DEFINE_CLK_RPM(_platform, _name, _active, r_id)			      \
     27	static struct clk_rpm _platform##_##_active;			      \
     28	static struct clk_rpm _platform##_##_name = {			      \
     29		.rpm_clk_id = (r_id),					      \
     30		.peer = &_platform##_##_active,				      \
     31		.rate = INT_MAX,					      \
     32		.hw.init = &(struct clk_init_data){			      \
     33			.ops = &clk_rpm_ops,				      \
     34			.name = #_name,					      \
     35			.parent_names = (const char *[]){ "pxo_board" },      \
     36			.num_parents = 1,				      \
     37		},							      \
     38	};								      \
     39	static struct clk_rpm _platform##_##_active = {			      \
     40		.rpm_clk_id = (r_id),					      \
     41		.peer = &_platform##_##_name,				      \
     42		.active_only = true,					      \
     43		.rate = INT_MAX,					      \
     44		.hw.init = &(struct clk_init_data){			      \
     45			.ops = &clk_rpm_ops,				      \
     46			.name = #_active,				      \
     47			.parent_names = (const char *[]){ "pxo_board" },      \
     48			.num_parents = 1,				      \
     49		},							      \
     50	}
     51
     52#define DEFINE_CLK_RPM_XO_BUFFER(_platform, _name, _active, offset)	      \
     53	static struct clk_rpm _platform##_##_name = {			      \
     54		.rpm_clk_id = QCOM_RPM_CXO_BUFFERS,			      \
     55		.xo_offset = (offset),					      \
     56		.hw.init = &(struct clk_init_data){			      \
     57			.ops = &clk_rpm_xo_ops,			      \
     58			.name = #_name,					      \
     59			.parent_names = (const char *[]){ "cxo_board" },      \
     60			.num_parents = 1,				      \
     61		},							      \
     62	}
     63
     64#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r)	      \
     65	static struct clk_rpm _platform##_##_name = {			      \
     66		.rpm_clk_id = (r_id),					      \
     67		.rate = (r),						      \
     68		.hw.init = &(struct clk_init_data){			      \
     69			.ops = &clk_rpm_fixed_ops,			      \
     70			.name = #_name,					      \
     71			.parent_names = (const char *[]){ "pxo" },	      \
     72			.num_parents = 1,				      \
     73		},							      \
     74	}
     75
     76#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
     77
     78struct rpm_cc;
     79
     80struct clk_rpm {
     81	const int rpm_clk_id;
     82	const int xo_offset;
     83	const bool active_only;
     84	unsigned long rate;
     85	bool enabled;
     86	bool branch;
     87	struct clk_rpm *peer;
     88	struct clk_hw hw;
     89	struct qcom_rpm *rpm;
     90	struct rpm_cc *rpm_cc;
     91};
     92
     93struct rpm_cc {
     94	struct qcom_rpm *rpm;
     95	struct clk_rpm **clks;
     96	size_t num_clks;
     97	u32 xo_buffer_value;
     98	struct mutex xo_lock;
     99};
    100
    101struct rpm_clk_desc {
    102	struct clk_rpm **clks;
    103	size_t num_clks;
    104};
    105
    106static DEFINE_MUTEX(rpm_clk_lock);
    107
    108static int clk_rpm_handoff(struct clk_rpm *r)
    109{
    110	int ret;
    111	u32 value = INT_MAX;
    112
    113	/*
    114	 * The vendor tree simply reads the status for this
    115	 * RPM clock.
    116	 */
    117	if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
    118		r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
    119		return 0;
    120
    121	ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
    122			     r->rpm_clk_id, &value, 1);
    123	if (ret)
    124		return ret;
    125	ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
    126			     r->rpm_clk_id, &value, 1);
    127	if (ret)
    128		return ret;
    129
    130	return 0;
    131}
    132
    133static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
    134{
    135	u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
    136
    137	return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
    138			      r->rpm_clk_id, &value, 1);
    139}
    140
    141static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
    142{
    143	u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
    144
    145	return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
    146			      r->rpm_clk_id, &value, 1);
    147}
    148
    149static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
    150			    unsigned long *active, unsigned long *sleep)
    151{
    152	*active = rate;
    153
    154	/*
    155	 * Active-only clocks don't care what the rate is during sleep. So,
    156	 * they vote for zero.
    157	 */
    158	if (r->active_only)
    159		*sleep = 0;
    160	else
    161		*sleep = *active;
    162}
    163
    164static int clk_rpm_prepare(struct clk_hw *hw)
    165{
    166	struct clk_rpm *r = to_clk_rpm(hw);
    167	struct clk_rpm *peer = r->peer;
    168	unsigned long this_rate = 0, this_sleep_rate = 0;
    169	unsigned long peer_rate = 0, peer_sleep_rate = 0;
    170	unsigned long active_rate, sleep_rate;
    171	int ret = 0;
    172
    173	mutex_lock(&rpm_clk_lock);
    174
    175	/* Don't send requests to the RPM if the rate has not been set. */
    176	if (!r->rate)
    177		goto out;
    178
    179	to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
    180
    181	/* Take peer clock's rate into account only if it's enabled. */
    182	if (peer->enabled)
    183		to_active_sleep(peer, peer->rate,
    184				&peer_rate, &peer_sleep_rate);
    185
    186	active_rate = max(this_rate, peer_rate);
    187
    188	if (r->branch)
    189		active_rate = !!active_rate;
    190
    191	ret = clk_rpm_set_rate_active(r, active_rate);
    192	if (ret)
    193		goto out;
    194
    195	sleep_rate = max(this_sleep_rate, peer_sleep_rate);
    196	if (r->branch)
    197		sleep_rate = !!sleep_rate;
    198
    199	ret = clk_rpm_set_rate_sleep(r, sleep_rate);
    200	if (ret)
    201		/* Undo the active set vote and restore it */
    202		ret = clk_rpm_set_rate_active(r, peer_rate);
    203
    204out:
    205	if (!ret)
    206		r->enabled = true;
    207
    208	mutex_unlock(&rpm_clk_lock);
    209
    210	return ret;
    211}
    212
    213static void clk_rpm_unprepare(struct clk_hw *hw)
    214{
    215	struct clk_rpm *r = to_clk_rpm(hw);
    216	struct clk_rpm *peer = r->peer;
    217	unsigned long peer_rate = 0, peer_sleep_rate = 0;
    218	unsigned long active_rate, sleep_rate;
    219	int ret;
    220
    221	mutex_lock(&rpm_clk_lock);
    222
    223	if (!r->rate)
    224		goto out;
    225
    226	/* Take peer clock's rate into account only if it's enabled. */
    227	if (peer->enabled)
    228		to_active_sleep(peer, peer->rate, &peer_rate,
    229				&peer_sleep_rate);
    230
    231	active_rate = r->branch ? !!peer_rate : peer_rate;
    232	ret = clk_rpm_set_rate_active(r, active_rate);
    233	if (ret)
    234		goto out;
    235
    236	sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
    237	ret = clk_rpm_set_rate_sleep(r, sleep_rate);
    238	if (ret)
    239		goto out;
    240
    241	r->enabled = false;
    242
    243out:
    244	mutex_unlock(&rpm_clk_lock);
    245}
    246
    247static int clk_rpm_xo_prepare(struct clk_hw *hw)
    248{
    249	struct clk_rpm *r = to_clk_rpm(hw);
    250	struct rpm_cc *rcc = r->rpm_cc;
    251	int ret, clk_id = r->rpm_clk_id;
    252	u32 value;
    253
    254	mutex_lock(&rcc->xo_lock);
    255
    256	value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
    257	ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
    258	if (!ret) {
    259		r->enabled = true;
    260		rcc->xo_buffer_value = value;
    261	}
    262
    263	mutex_unlock(&rcc->xo_lock);
    264
    265	return ret;
    266}
    267
    268static void clk_rpm_xo_unprepare(struct clk_hw *hw)
    269{
    270	struct clk_rpm *r = to_clk_rpm(hw);
    271	struct rpm_cc *rcc = r->rpm_cc;
    272	int ret, clk_id = r->rpm_clk_id;
    273	u32 value;
    274
    275	mutex_lock(&rcc->xo_lock);
    276
    277	value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
    278	ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
    279	if (!ret) {
    280		r->enabled = false;
    281		rcc->xo_buffer_value = value;
    282	}
    283
    284	mutex_unlock(&rcc->xo_lock);
    285}
    286
    287static int clk_rpm_fixed_prepare(struct clk_hw *hw)
    288{
    289	struct clk_rpm *r = to_clk_rpm(hw);
    290	u32 value = 1;
    291	int ret;
    292
    293	ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
    294			     r->rpm_clk_id, &value, 1);
    295	if (!ret)
    296		r->enabled = true;
    297
    298	return ret;
    299}
    300
    301static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
    302{
    303	struct clk_rpm *r = to_clk_rpm(hw);
    304	u32 value = 0;
    305	int ret;
    306
    307	ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
    308			     r->rpm_clk_id, &value, 1);
    309	if (!ret)
    310		r->enabled = false;
    311}
    312
    313static int clk_rpm_set_rate(struct clk_hw *hw,
    314			    unsigned long rate, unsigned long parent_rate)
    315{
    316	struct clk_rpm *r = to_clk_rpm(hw);
    317	struct clk_rpm *peer = r->peer;
    318	unsigned long active_rate, sleep_rate;
    319	unsigned long this_rate = 0, this_sleep_rate = 0;
    320	unsigned long peer_rate = 0, peer_sleep_rate = 0;
    321	int ret = 0;
    322
    323	mutex_lock(&rpm_clk_lock);
    324
    325	if (!r->enabled)
    326		goto out;
    327
    328	to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
    329
    330	/* Take peer clock's rate into account only if it's enabled. */
    331	if (peer->enabled)
    332		to_active_sleep(peer, peer->rate,
    333				&peer_rate, &peer_sleep_rate);
    334
    335	active_rate = max(this_rate, peer_rate);
    336	ret = clk_rpm_set_rate_active(r, active_rate);
    337	if (ret)
    338		goto out;
    339
    340	sleep_rate = max(this_sleep_rate, peer_sleep_rate);
    341	ret = clk_rpm_set_rate_sleep(r, sleep_rate);
    342	if (ret)
    343		goto out;
    344
    345	r->rate = rate;
    346
    347out:
    348	mutex_unlock(&rpm_clk_lock);
    349
    350	return ret;
    351}
    352
    353static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
    354			       unsigned long *parent_rate)
    355{
    356	/*
    357	 * RPM handles rate rounding and we don't have a way to
    358	 * know what the rate will be, so just return whatever
    359	 * rate is requested.
    360	 */
    361	return rate;
    362}
    363
    364static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
    365					 unsigned long parent_rate)
    366{
    367	struct clk_rpm *r = to_clk_rpm(hw);
    368
    369	/*
    370	 * RPM handles rate rounding and we don't have a way to
    371	 * know what the rate will be, so just return whatever
    372	 * rate was set.
    373	 */
    374	return r->rate;
    375}
    376
    377static const struct clk_ops clk_rpm_xo_ops = {
    378	.prepare	= clk_rpm_xo_prepare,
    379	.unprepare	= clk_rpm_xo_unprepare,
    380};
    381
    382static const struct clk_ops clk_rpm_fixed_ops = {
    383	.prepare	= clk_rpm_fixed_prepare,
    384	.unprepare	= clk_rpm_fixed_unprepare,
    385	.round_rate	= clk_rpm_round_rate,
    386	.recalc_rate	= clk_rpm_recalc_rate,
    387};
    388
    389static const struct clk_ops clk_rpm_ops = {
    390	.prepare	= clk_rpm_prepare,
    391	.unprepare	= clk_rpm_unprepare,
    392	.set_rate	= clk_rpm_set_rate,
    393	.round_rate	= clk_rpm_round_rate,
    394	.recalc_rate	= clk_rpm_recalc_rate,
    395};
    396
    397/* MSM8660/APQ8060 */
    398DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
    399DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
    400DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
    401DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
    402DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
    403DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
    404DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
    405DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK);
    406DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
    407DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000);
    408
    409static struct clk_rpm *msm8660_clks[] = {
    410	[RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk,
    411	[RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk,
    412	[RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk,
    413	[RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk,
    414	[RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk,
    415	[RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk,
    416	[RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk,
    417	[RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk,
    418	[RPM_SFPB_CLK] = &msm8660_sfpb_clk,
    419	[RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk,
    420	[RPM_CFPB_CLK] = &msm8660_cfpb_clk,
    421	[RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk,
    422	[RPM_MMFPB_CLK] = &msm8660_mmfpb_clk,
    423	[RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk,
    424	[RPM_SMI_CLK] = &msm8660_smi_clk,
    425	[RPM_SMI_A_CLK] = &msm8660_smi_a_clk,
    426	[RPM_EBI1_CLK] = &msm8660_ebi1_clk,
    427	[RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk,
    428	[RPM_PLL4_CLK] = &msm8660_pll4_clk,
    429};
    430
    431static const struct rpm_clk_desc rpm_clk_msm8660 = {
    432	.clks = msm8660_clks,
    433	.num_clks = ARRAY_SIZE(msm8660_clks),
    434};
    435
    436/* apq8064 */
    437DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
    438DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
    439DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
    440DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
    441DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
    442DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
    443DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
    444DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
    445DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
    446DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d0_clk, xo_d0_a_clk, 0);
    447DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_d1_clk, xo_d1_a_clk, 8);
    448DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a0_clk, xo_a0_a_clk, 16);
    449DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a1_clk, xo_a1_a_clk, 24);
    450DEFINE_CLK_RPM_XO_BUFFER(apq8064, xo_a2_clk, xo_a2_a_clk, 28);
    451
    452static struct clk_rpm *apq8064_clks[] = {
    453	[RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
    454	[RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
    455	[RPM_CFPB_CLK] = &apq8064_cfpb_clk,
    456	[RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
    457	[RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
    458	[RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
    459	[RPM_EBI1_CLK] = &apq8064_ebi1_clk,
    460	[RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
    461	[RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
    462	[RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
    463	[RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
    464	[RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
    465	[RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
    466	[RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
    467	[RPM_SFPB_CLK] = &apq8064_sfpb_clk,
    468	[RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
    469	[RPM_QDSS_CLK] = &apq8064_qdss_clk,
    470	[RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
    471	[RPM_XO_D0] = &apq8064_xo_d0_clk,
    472	[RPM_XO_D1] = &apq8064_xo_d1_clk,
    473	[RPM_XO_A0] = &apq8064_xo_a0_clk,
    474	[RPM_XO_A1] = &apq8064_xo_a1_clk,
    475	[RPM_XO_A2] = &apq8064_xo_a2_clk,
    476};
    477
    478static const struct rpm_clk_desc rpm_clk_apq8064 = {
    479	.clks = apq8064_clks,
    480	.num_clks = ARRAY_SIZE(apq8064_clks),
    481};
    482
    483/* ipq806x */
    484DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
    485DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
    486DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
    487DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
    488DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
    489DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
    490DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK);
    491DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK);
    492
    493static struct clk_rpm *ipq806x_clks[] = {
    494	[RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk,
    495	[RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk,
    496	[RPM_CFPB_CLK] = &ipq806x_cfpb_clk,
    497	[RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk,
    498	[RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk,
    499	[RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk,
    500	[RPM_EBI1_CLK] = &ipq806x_ebi1_clk,
    501	[RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk,
    502	[RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk,
    503	[RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk,
    504	[RPM_SFPB_CLK] = &ipq806x_sfpb_clk,
    505	[RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk,
    506	[RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk,
    507	[RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk,
    508	[RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk,
    509	[RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk,
    510};
    511
    512static const struct rpm_clk_desc rpm_clk_ipq806x = {
    513	.clks = ipq806x_clks,
    514	.num_clks = ARRAY_SIZE(ipq806x_clks),
    515};
    516
    517static const struct of_device_id rpm_clk_match_table[] = {
    518	{ .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
    519	{ .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
    520	{ .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
    521	{ .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
    522	{ }
    523};
    524MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
    525
    526static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
    527					  void *data)
    528{
    529	struct rpm_cc *rcc = data;
    530	unsigned int idx = clkspec->args[0];
    531
    532	if (idx >= rcc->num_clks) {
    533		pr_err("%s: invalid index %u\n", __func__, idx);
    534		return ERR_PTR(-EINVAL);
    535	}
    536
    537	return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
    538}
    539
    540static int rpm_clk_probe(struct platform_device *pdev)
    541{
    542	struct rpm_cc *rcc;
    543	int ret;
    544	size_t num_clks, i;
    545	struct qcom_rpm *rpm;
    546	struct clk_rpm **rpm_clks;
    547	const struct rpm_clk_desc *desc;
    548
    549	rpm = dev_get_drvdata(pdev->dev.parent);
    550	if (!rpm) {
    551		dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
    552		return -ENODEV;
    553	}
    554
    555	desc = of_device_get_match_data(&pdev->dev);
    556	if (!desc)
    557		return -EINVAL;
    558
    559	rpm_clks = desc->clks;
    560	num_clks = desc->num_clks;
    561
    562	rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
    563	if (!rcc)
    564		return -ENOMEM;
    565
    566	rcc->clks = rpm_clks;
    567	rcc->num_clks = num_clks;
    568	mutex_init(&rcc->xo_lock);
    569
    570	for (i = 0; i < num_clks; i++) {
    571		if (!rpm_clks[i])
    572			continue;
    573
    574		rpm_clks[i]->rpm = rpm;
    575		rpm_clks[i]->rpm_cc = rcc;
    576
    577		ret = clk_rpm_handoff(rpm_clks[i]);
    578		if (ret)
    579			goto err;
    580	}
    581
    582	for (i = 0; i < num_clks; i++) {
    583		if (!rpm_clks[i])
    584			continue;
    585
    586		ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
    587		if (ret)
    588			goto err;
    589	}
    590
    591	ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get,
    592				     rcc);
    593	if (ret)
    594		goto err;
    595
    596	return 0;
    597err:
    598	dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
    599	return ret;
    600}
    601
    602static int rpm_clk_remove(struct platform_device *pdev)
    603{
    604	of_clk_del_provider(pdev->dev.of_node);
    605	return 0;
    606}
    607
    608static struct platform_driver rpm_clk_driver = {
    609	.driver = {
    610		.name = "qcom-clk-rpm",
    611		.of_match_table = rpm_clk_match_table,
    612	},
    613	.probe = rpm_clk_probe,
    614	.remove = rpm_clk_remove,
    615};
    616
    617static int __init rpm_clk_init(void)
    618{
    619	return platform_driver_register(&rpm_clk_driver);
    620}
    621core_initcall(rpm_clk_init);
    622
    623static void __exit rpm_clk_exit(void)
    624{
    625	platform_driver_unregister(&rpm_clk_driver);
    626}
    627module_exit(rpm_clk_exit);
    628
    629MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
    630MODULE_LICENSE("GPL v2");
    631MODULE_ALIAS("platform:qcom-clk-rpm");