cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

clk-scu.c (21116B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Copyright 2018-2021 NXP
      4 *   Dong Aisheng <aisheng.dong@nxp.com>
      5 */
      6
      7#include <dt-bindings/firmware/imx/rsrc.h>
      8#include <linux/arm-smccc.h>
      9#include <linux/bsearch.h>
     10#include <linux/clk-provider.h>
     11#include <linux/err.h>
     12#include <linux/of_platform.h>
     13#include <linux/platform_device.h>
     14#include <linux/pm_domain.h>
     15#include <linux/pm_runtime.h>
     16#include <linux/slab.h>
     17
     18#include "clk-scu.h"
     19
     20#define IMX_SIP_CPUFREQ			0xC2000001
     21#define IMX_SIP_SET_CPUFREQ		0x00
     22
     23static struct imx_sc_ipc *ccm_ipc_handle;
     24static struct device_node *pd_np;
     25static struct platform_driver imx_clk_scu_driver;
     26static const struct imx_clk_scu_rsrc_table *rsrc_table;
     27
     28struct imx_scu_clk_node {
     29	const char *name;
     30	u32 rsrc;
     31	u8 clk_type;
     32	const char * const *parents;
     33	int num_parents;
     34
     35	struct clk_hw *hw;
     36	struct list_head node;
     37};
     38
     39struct list_head imx_scu_clks[IMX_SC_R_LAST];
     40
     41/*
     42 * struct clk_scu - Description of one SCU clock
     43 * @hw: the common clk_hw
     44 * @rsrc_id: resource ID of this SCU clock
     45 * @clk_type: type of this clock resource
     46 */
     47struct clk_scu {
     48	struct clk_hw hw;
     49	u16 rsrc_id;
     50	u8 clk_type;
     51
     52	/* for state save&restore */
     53	struct clk_hw *parent;
     54	u8 parent_index;
     55	bool is_enabled;
     56	u32 rate;
     57};
     58
     59/*
     60 * struct clk_gpr_scu - Description of one SCU GPR clock
     61 * @hw: the common clk_hw
     62 * @rsrc_id: resource ID of this SCU clock
     63 * @gpr_id: GPR ID index to control the divider
     64 */
     65struct clk_gpr_scu {
     66	struct clk_hw hw;
     67	u16 rsrc_id;
     68	u8 gpr_id;
     69	u8 flags;
     70	bool gate_invert;
     71};
     72
     73#define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw)
     74
     75/*
     76 * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
     77 * @hdr: SCU protocol header
     78 * @rate: rate to set
     79 * @resource: clock resource to set rate
     80 * @clk: clk type of this resource
     81 *
     82 * This structure describes the SCU protocol of clock rate set
     83 */
     84struct imx_sc_msg_req_set_clock_rate {
     85	struct imx_sc_rpc_msg hdr;
     86	__le32 rate;
     87	__le16 resource;
     88	u8 clk;
     89} __packed __aligned(4);
     90
     91struct req_get_clock_rate {
     92	__le16 resource;
     93	u8 clk;
     94} __packed __aligned(4);
     95
     96struct resp_get_clock_rate {
     97	__le32 rate;
     98};
     99
    100/*
    101 * struct imx_sc_msg_get_clock_rate - clock get rate protocol
    102 * @hdr: SCU protocol header
    103 * @req: get rate request protocol
    104 * @resp: get rate response protocol
    105 *
    106 * This structure describes the SCU protocol of clock rate get
    107 */
    108struct imx_sc_msg_get_clock_rate {
    109	struct imx_sc_rpc_msg hdr;
    110	union {
    111		struct req_get_clock_rate req;
    112		struct resp_get_clock_rate resp;
    113	} data;
    114};
    115
    116/*
    117 * struct imx_sc_msg_get_clock_parent - clock get parent protocol
    118 * @hdr: SCU protocol header
    119 * @req: get parent request protocol
    120 * @resp: get parent response protocol
    121 *
    122 * This structure describes the SCU protocol of clock get parent
    123 */
    124struct imx_sc_msg_get_clock_parent {
    125	struct imx_sc_rpc_msg hdr;
    126	union {
    127		struct req_get_clock_parent {
    128			__le16 resource;
    129			u8 clk;
    130		} __packed __aligned(4) req;
    131		struct resp_get_clock_parent {
    132			u8 parent;
    133		} resp;
    134	} data;
    135};
    136
    137/*
    138 * struct imx_sc_msg_set_clock_parent - clock set parent protocol
    139 * @hdr: SCU protocol header
    140 * @req: set parent request protocol
    141 *
    142 * This structure describes the SCU protocol of clock set parent
    143 */
    144struct imx_sc_msg_set_clock_parent {
    145	struct imx_sc_rpc_msg hdr;
    146	__le16 resource;
    147	u8 clk;
    148	u8 parent;
    149} __packed;
    150
    151/*
    152 * struct imx_sc_msg_req_clock_enable - clock gate protocol
    153 * @hdr: SCU protocol header
    154 * @resource: clock resource to gate
    155 * @clk: clk type of this resource
    156 * @enable: whether gate off the clock
    157 * @autog: HW auto gate enable
    158 *
    159 * This structure describes the SCU protocol of clock gate
    160 */
    161struct imx_sc_msg_req_clock_enable {
    162	struct imx_sc_rpc_msg hdr;
    163	__le16 resource;
    164	u8 clk;
    165	u8 enable;
    166	u8 autog;
    167} __packed __aligned(4);
    168
    169static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
    170{
    171	return container_of(hw, struct clk_scu, hw);
    172}
    173
    174static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p)
    175{
    176	return *(u32 *)rsrc - *(u32 *)rsrc_p;
    177}
    178
    179static bool imx_scu_clk_is_valid(u32 rsrc_id)
    180{
    181	void *p;
    182
    183	if (!rsrc_table)
    184		return true;
    185
    186	p = bsearch(&rsrc_id, rsrc_table->rsrc, rsrc_table->num,
    187		    sizeof(rsrc_table->rsrc[0]), imx_scu_clk_search_cmp);
    188
    189	return p != NULL;
    190}
    191
    192int imx_clk_scu_init(struct device_node *np,
    193		     const struct imx_clk_scu_rsrc_table *data)
    194{
    195	u32 clk_cells;
    196	int ret, i;
    197
    198	ret = imx_scu_get_handle(&ccm_ipc_handle);
    199	if (ret)
    200		return ret;
    201
    202	of_property_read_u32(np, "#clock-cells", &clk_cells);
    203
    204	if (clk_cells == 2) {
    205		for (i = 0; i < IMX_SC_R_LAST; i++)
    206			INIT_LIST_HEAD(&imx_scu_clks[i]);
    207
    208		/* pd_np will be used to attach power domains later */
    209		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
    210		if (!pd_np)
    211			return -EINVAL;
    212
    213		rsrc_table = data;
    214	}
    215
    216	return platform_driver_register(&imx_clk_scu_driver);
    217}
    218
    219/*
    220 * clk_scu_recalc_rate - Get clock rate for a SCU clock
    221 * @hw: clock to get rate for
    222 * @parent_rate: parent rate provided by common clock framework, not used
    223 *
    224 * Gets the current clock rate of a SCU clock. Returns the current
    225 * clock rate, or zero in failure.
    226 */
    227static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
    228					 unsigned long parent_rate)
    229{
    230	struct clk_scu *clk = to_clk_scu(hw);
    231	struct imx_sc_msg_get_clock_rate msg;
    232	struct imx_sc_rpc_msg *hdr = &msg.hdr;
    233	int ret;
    234
    235	hdr->ver = IMX_SC_RPC_VERSION;
    236	hdr->svc = IMX_SC_RPC_SVC_PM;
    237	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
    238	hdr->size = 2;
    239
    240	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
    241	msg.data.req.clk = clk->clk_type;
    242
    243	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
    244	if (ret) {
    245		pr_err("%s: failed to get clock rate %d\n",
    246		       clk_hw_get_name(hw), ret);
    247		return 0;
    248	}
    249
    250	return le32_to_cpu(msg.data.resp.rate);
    251}
    252
    253/*
    254 * clk_scu_round_rate - Round clock rate for a SCU clock
    255 * @hw: clock to round rate for
    256 * @rate: rate to round
    257 * @parent_rate: parent rate provided by common clock framework, not used
    258 *
    259 * Returns the current clock rate, or zero in failure.
    260 */
    261static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
    262			       unsigned long *parent_rate)
    263{
    264	/*
    265	 * Assume we support all the requested rate and let the SCU firmware
    266	 * to handle the left work
    267	 */
    268	return rate;
    269}
    270
    271static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
    272				    unsigned long parent_rate)
    273{
    274	struct clk_scu *clk = to_clk_scu(hw);
    275	struct arm_smccc_res res;
    276	unsigned long cluster_id;
    277
    278	if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53)
    279		cluster_id = 0;
    280	else if (clk->rsrc_id == IMX_SC_R_A72)
    281		cluster_id = 1;
    282	else
    283		return -EINVAL;
    284
    285	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
    286	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
    287		      cluster_id, rate, 0, 0, 0, 0, &res);
    288
    289	return 0;
    290}
    291
    292/*
    293 * clk_scu_set_rate - Set rate for a SCU clock
    294 * @hw: clock to change rate for
    295 * @rate: target rate for the clock
    296 * @parent_rate: rate of the clock parent, not used for SCU clocks
    297 *
    298 * Sets a clock frequency for a SCU clock. Returns the SCU
    299 * protocol status.
    300 */
    301static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
    302			    unsigned long parent_rate)
    303{
    304	struct clk_scu *clk = to_clk_scu(hw);
    305	struct imx_sc_msg_req_set_clock_rate msg;
    306	struct imx_sc_rpc_msg *hdr = &msg.hdr;
    307
    308	hdr->ver = IMX_SC_RPC_VERSION;
    309	hdr->svc = IMX_SC_RPC_SVC_PM;
    310	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
    311	hdr->size = 3;
    312
    313	msg.rate = cpu_to_le32(rate);
    314	msg.resource = cpu_to_le16(clk->rsrc_id);
    315	msg.clk = clk->clk_type;
    316
    317	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
    318}
    319
    320static u8 clk_scu_get_parent(struct clk_hw *hw)
    321{
    322	struct clk_scu *clk = to_clk_scu(hw);
    323	struct imx_sc_msg_get_clock_parent msg;
    324	struct imx_sc_rpc_msg *hdr = &msg.hdr;
    325	int ret;
    326
    327	hdr->ver = IMX_SC_RPC_VERSION;
    328	hdr->svc = IMX_SC_RPC_SVC_PM;
    329	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
    330	hdr->size = 2;
    331
    332	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
    333	msg.data.req.clk = clk->clk_type;
    334
    335	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
    336	if (ret) {
    337		pr_err("%s: failed to get clock parent %d\n",
    338		       clk_hw_get_name(hw), ret);
    339		return 0;
    340	}
    341
    342	clk->parent_index = msg.data.resp.parent;
    343
    344	return msg.data.resp.parent;
    345}
    346
    347static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
    348{
    349	struct clk_scu *clk = to_clk_scu(hw);
    350	struct imx_sc_msg_set_clock_parent msg;
    351	struct imx_sc_rpc_msg *hdr = &msg.hdr;
    352	int ret;
    353
    354	hdr->ver = IMX_SC_RPC_VERSION;
    355	hdr->svc = IMX_SC_RPC_SVC_PM;
    356	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
    357	hdr->size = 2;
    358
    359	msg.resource = cpu_to_le16(clk->rsrc_id);
    360	msg.clk = clk->clk_type;
    361	msg.parent = index;
    362
    363	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
    364	if (ret) {
    365		pr_err("%s: failed to set clock parent %d\n",
    366		       clk_hw_get_name(hw), ret);
    367		return ret;
    368	}
    369
    370	clk->parent_index = index;
    371
    372	return 0;
    373}
    374
    375static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
    376			      u8 clk, bool enable, bool autog)
    377{
    378	struct imx_sc_msg_req_clock_enable msg;
    379	struct imx_sc_rpc_msg *hdr = &msg.hdr;
    380
    381	hdr->ver = IMX_SC_RPC_VERSION;
    382	hdr->svc = IMX_SC_RPC_SVC_PM;
    383	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
    384	hdr->size = 3;
    385
    386	msg.resource = cpu_to_le16(resource);
    387	msg.clk = clk;
    388	msg.enable = enable;
    389	msg.autog = autog;
    390
    391	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
    392}
    393
    394/*
    395 * clk_scu_prepare - Enable a SCU clock
    396 * @hw: clock to enable
    397 *
    398 * Enable the clock at the DSC slice level
    399 */
    400static int clk_scu_prepare(struct clk_hw *hw)
    401{
    402	struct clk_scu *clk = to_clk_scu(hw);
    403
    404	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
    405				  clk->clk_type, true, false);
    406}
    407
    408/*
    409 * clk_scu_unprepare - Disable a SCU clock
    410 * @hw: clock to enable
    411 *
    412 * Disable the clock at the DSC slice level
    413 */
    414static void clk_scu_unprepare(struct clk_hw *hw)
    415{
    416	struct clk_scu *clk = to_clk_scu(hw);
    417	int ret;
    418
    419	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
    420				 clk->clk_type, false, false);
    421	if (ret)
    422		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
    423			ret);
    424}
    425
    426static const struct clk_ops clk_scu_ops = {
    427	.recalc_rate = clk_scu_recalc_rate,
    428	.round_rate = clk_scu_round_rate,
    429	.set_rate = clk_scu_set_rate,
    430	.get_parent = clk_scu_get_parent,
    431	.set_parent = clk_scu_set_parent,
    432	.prepare = clk_scu_prepare,
    433	.unprepare = clk_scu_unprepare,
    434};
    435
    436static const struct clk_ops clk_scu_cpu_ops = {
    437	.recalc_rate = clk_scu_recalc_rate,
    438	.round_rate = clk_scu_round_rate,
    439	.set_rate = clk_scu_atf_set_cpu_rate,
    440	.prepare = clk_scu_prepare,
    441	.unprepare = clk_scu_unprepare,
    442};
    443
    444static const struct clk_ops clk_scu_pi_ops = {
    445	.recalc_rate = clk_scu_recalc_rate,
    446	.round_rate  = clk_scu_round_rate,
    447	.set_rate    = clk_scu_set_rate,
    448};
    449
    450struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
    451			     const char * const *parents, int num_parents,
    452			     u32 rsrc_id, u8 clk_type)
    453{
    454	struct clk_init_data init;
    455	struct clk_scu *clk;
    456	struct clk_hw *hw;
    457	int ret;
    458
    459	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
    460	if (!clk)
    461		return ERR_PTR(-ENOMEM);
    462
    463	clk->rsrc_id = rsrc_id;
    464	clk->clk_type = clk_type;
    465
    466	init.name = name;
    467	init.ops = &clk_scu_ops;
    468	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72)
    469		init.ops = &clk_scu_cpu_ops;
    470	else if (rsrc_id == IMX_SC_R_PI_0_PLL)
    471		init.ops = &clk_scu_pi_ops;
    472	else
    473		init.ops = &clk_scu_ops;
    474	init.parent_names = parents;
    475	init.num_parents = num_parents;
    476
    477	/*
    478	 * Note on MX8, the clocks are tightly coupled with power domain
    479	 * that once the power domain is off, the clock status may be
    480	 * lost. So we make it NOCACHE to let user to retrieve the real
    481	 * clock status from HW instead of using the possible invalid
    482	 * cached rate.
    483	 */
    484	init.flags = CLK_GET_RATE_NOCACHE;
    485	clk->hw.init = &init;
    486
    487	hw = &clk->hw;
    488	ret = clk_hw_register(dev, hw);
    489	if (ret) {
    490		kfree(clk);
    491		hw = ERR_PTR(ret);
    492		return hw;
    493	}
    494
    495	if (dev)
    496		dev_set_drvdata(dev, clk);
    497
    498	return hw;
    499}
    500
    501struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
    502				      void *data)
    503{
    504	unsigned int rsrc = clkspec->args[0];
    505	unsigned int idx = clkspec->args[1];
    506	struct list_head *scu_clks = data;
    507	struct imx_scu_clk_node *clk;
    508
    509	list_for_each_entry(clk, &scu_clks[rsrc], node) {
    510		if (clk->clk_type == idx)
    511			return clk->hw;
    512	}
    513
    514	return ERR_PTR(-ENODEV);
    515}
    516
    517static int imx_clk_scu_probe(struct platform_device *pdev)
    518{
    519	struct device *dev = &pdev->dev;
    520	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
    521	struct clk_hw *hw;
    522	int ret;
    523
    524	if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
    525	    (clk->rsrc == IMX_SC_R_A72))) {
    526		pm_runtime_set_suspended(dev);
    527		pm_runtime_set_autosuspend_delay(dev, 50);
    528		pm_runtime_use_autosuspend(&pdev->dev);
    529		pm_runtime_enable(dev);
    530
    531		ret = pm_runtime_resume_and_get(dev);
    532		if (ret) {
    533			pm_genpd_remove_device(dev);
    534			pm_runtime_disable(dev);
    535			return ret;
    536		}
    537	}
    538
    539	hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
    540			   clk->rsrc, clk->clk_type);
    541	if (IS_ERR(hw)) {
    542		pm_runtime_disable(dev);
    543		return PTR_ERR(hw);
    544	}
    545
    546	clk->hw = hw;
    547	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
    548
    549	if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
    550	    (clk->rsrc == IMX_SC_R_A72))) {
    551		pm_runtime_mark_last_busy(&pdev->dev);
    552		pm_runtime_put_autosuspend(&pdev->dev);
    553	}
    554
    555	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
    556		clk->clk_type);
    557
    558	return 0;
    559}
    560
    561static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
    562{
    563	struct clk_scu *clk = dev_get_drvdata(dev);
    564	u32 rsrc_id = clk->rsrc_id;
    565
    566	if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
    567	    (rsrc_id == IMX_SC_R_A72))
    568		return 0;
    569
    570	clk->parent = clk_hw_get_parent(&clk->hw);
    571
    572	/* DC SS needs to handle bypass clock using non-cached clock rate */
    573	if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 ||
    574		clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 ||
    575		clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 ||
    576		clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1)
    577		clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
    578	else
    579		clk->rate = clk_hw_get_rate(&clk->hw);
    580	clk->is_enabled = clk_hw_is_enabled(&clk->hw);
    581
    582	if (clk->parent)
    583		dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
    584			clk->parent_index);
    585
    586	if (clk->rate)
    587		dev_dbg(dev, "save rate %d\n", clk->rate);
    588
    589	if (clk->is_enabled)
    590		dev_dbg(dev, "save enabled state\n");
    591
    592	return 0;
    593}
    594
    595static int __maybe_unused imx_clk_scu_resume(struct device *dev)
    596{
    597	struct clk_scu *clk = dev_get_drvdata(dev);
    598	u32 rsrc_id = clk->rsrc_id;
    599	int ret = 0;
    600
    601	if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
    602	    (rsrc_id == IMX_SC_R_A72))
    603		return 0;
    604
    605	if (clk->parent) {
    606		ret = clk_scu_set_parent(&clk->hw, clk->parent_index);
    607		dev_dbg(dev, "restore parent %s idx %u %s\n",
    608			clk_hw_get_name(clk->parent),
    609			clk->parent_index, !ret ? "success" : "failed");
    610	}
    611
    612	if (clk->rate) {
    613		ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
    614		dev_dbg(dev, "restore rate %d %s\n", clk->rate,
    615			!ret ? "success" : "failed");
    616	}
    617
    618	if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) {
    619		ret = clk_scu_prepare(&clk->hw);
    620		dev_dbg(dev, "restore enabled state %s\n",
    621			!ret ? "success" : "failed");
    622	}
    623
    624	return ret;
    625}
    626
    627static const struct dev_pm_ops imx_clk_scu_pm_ops = {
    628	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
    629				      imx_clk_scu_resume)
    630};
    631
    632static struct platform_driver imx_clk_scu_driver = {
    633	.driver = {
    634		.name = "imx-scu-clk",
    635		.suppress_bind_attrs = true,
    636		.pm = &imx_clk_scu_pm_ops,
    637	},
    638	.probe = imx_clk_scu_probe,
    639};
    640
    641static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
    642{
    643	struct of_phandle_args genpdspec = {
    644		.np = pd_np,
    645		.args_count = 1,
    646		.args[0] = rsrc_id,
    647	};
    648
    649	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
    650	    rsrc_id == IMX_SC_R_A72)
    651		return 0;
    652
    653	return of_genpd_add_device(&genpdspec, dev);
    654}
    655
    656struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
    657				     const char * const *parents,
    658				     int num_parents, u32 rsrc_id, u8 clk_type)
    659{
    660	struct imx_scu_clk_node clk = {
    661		.name = name,
    662		.rsrc = rsrc_id,
    663		.clk_type = clk_type,
    664		.parents = parents,
    665		.num_parents = num_parents,
    666	};
    667	struct platform_device *pdev;
    668	int ret;
    669
    670	if (!imx_scu_clk_is_valid(rsrc_id))
    671		return ERR_PTR(-EINVAL);
    672
    673	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
    674	if (!pdev) {
    675		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
    676		       name, rsrc_id, clk_type);
    677		return ERR_PTR(-ENOMEM);
    678	}
    679
    680	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
    681	if (ret) {
    682		platform_device_put(pdev);
    683		return ERR_PTR(ret);
    684	}
    685
    686	ret = driver_set_override(&pdev->dev, &pdev->driver_override,
    687				  "imx-scu-clk", strlen("imx-scu-clk"));
    688	if (ret) {
    689		platform_device_put(pdev);
    690		return ERR_PTR(ret);
    691	}
    692
    693	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
    694	if (ret)
    695		pr_warn("%s: failed to attached the power domain %d\n",
    696			name, ret);
    697
    698	platform_device_add(pdev);
    699
    700	/* For API backwards compatiblilty, simply return NULL for success */
    701	return NULL;
    702}
    703
    704void imx_clk_scu_unregister(void)
    705{
    706	struct imx_scu_clk_node *clk;
    707	int i;
    708
    709	for (i = 0; i < IMX_SC_R_LAST; i++) {
    710		list_for_each_entry(clk, &imx_scu_clks[i], node) {
    711			clk_hw_unregister(clk->hw);
    712			kfree(clk);
    713		}
    714	}
    715}
    716
    717static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
    718						 unsigned long parent_rate)
    719{
    720	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    721	unsigned long rate = 0;
    722	u32 val;
    723	int err;
    724
    725	err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
    726				      clk->gpr_id, &val);
    727
    728	rate  = val ? parent_rate / 2 : parent_rate;
    729
    730	return err ? 0 : rate;
    731}
    732
    733static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
    734				   unsigned long *prate)
    735{
    736	if (rate < *prate)
    737		rate = *prate / 2;
    738	else
    739		rate = *prate;
    740
    741	return rate;
    742}
    743
    744static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
    745				    unsigned long parent_rate)
    746{
    747	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    748	uint32_t val;
    749	int err;
    750
    751	val = (rate < parent_rate) ? 1 : 0;
    752	err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
    753				      clk->gpr_id, val);
    754
    755	return err ? -EINVAL : 0;
    756}
    757
    758static const struct clk_ops clk_gpr_div_scu_ops = {
    759	.recalc_rate = clk_gpr_div_scu_recalc_rate,
    760	.round_rate = clk_gpr_div_scu_round_rate,
    761	.set_rate = clk_gpr_div_scu_set_rate,
    762};
    763
    764static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw)
    765{
    766	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    767	u32 val = 0;
    768
    769	imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
    770				clk->gpr_id, &val);
    771
    772	return (u8)val;
    773}
    774
    775static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index)
    776{
    777	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    778
    779	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
    780				       clk->gpr_id, index);
    781}
    782
    783static const struct clk_ops clk_gpr_mux_scu_ops = {
    784	.get_parent = clk_gpr_mux_scu_get_parent,
    785	.set_parent = clk_gpr_mux_scu_set_parent,
    786};
    787
    788static int clk_gpr_gate_scu_prepare(struct clk_hw *hw)
    789{
    790	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    791
    792	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
    793				       clk->gpr_id, !clk->gate_invert);
    794}
    795
    796static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw)
    797{
    798	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    799	int ret;
    800
    801	ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
    802				      clk->gpr_id, clk->gate_invert);
    803	if (ret)
    804		pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
    805		       ret);
    806}
    807
    808static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw)
    809{
    810	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
    811	int ret;
    812	u32 val;
    813
    814	ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
    815				      clk->gpr_id, &val);
    816	if (ret)
    817		return ret;
    818
    819	return clk->gate_invert ? !val : val;
    820}
    821
    822static const struct clk_ops clk_gpr_gate_scu_ops = {
    823	.prepare = clk_gpr_gate_scu_prepare,
    824	.unprepare = clk_gpr_gate_scu_unprepare,
    825	.is_prepared = clk_gpr_gate_scu_is_prepared,
    826};
    827
    828struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name,
    829				 int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags,
    830				 bool invert)
    831{
    832	struct imx_scu_clk_node *clk_node;
    833	struct clk_gpr_scu *clk;
    834	struct clk_hw *hw;
    835	struct clk_init_data init;
    836	int ret;
    837
    838	if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST)
    839		return ERR_PTR(-EINVAL);
    840
    841	clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL);
    842	if (!clk_node)
    843		return ERR_PTR(-ENOMEM);
    844
    845	if (!imx_scu_clk_is_valid(rsrc_id)) {
    846		kfree(clk_node);
    847		return ERR_PTR(-EINVAL);
    848	}
    849
    850	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
    851	if (!clk) {
    852		kfree(clk_node);
    853		return ERR_PTR(-ENOMEM);
    854	}
    855
    856	clk->rsrc_id = rsrc_id;
    857	clk->gpr_id = gpr_id;
    858	clk->flags = flags;
    859	clk->gate_invert = invert;
    860
    861	if (flags & IMX_SCU_GPR_CLK_GATE)
    862		init.ops = &clk_gpr_gate_scu_ops;
    863
    864	if (flags & IMX_SCU_GPR_CLK_DIV)
    865		init.ops = &clk_gpr_div_scu_ops;
    866
    867	if (flags & IMX_SCU_GPR_CLK_MUX)
    868		init.ops = &clk_gpr_mux_scu_ops;
    869
    870	init.flags = 0;
    871	init.name = name;
    872	init.parent_names = parent_name;
    873	init.num_parents = num_parents;
    874
    875	clk->hw.init = &init;
    876
    877	hw = &clk->hw;
    878	ret = clk_hw_register(NULL, hw);
    879	if (ret) {
    880		kfree(clk);
    881		kfree(clk_node);
    882		hw = ERR_PTR(ret);
    883	} else {
    884		clk_node->hw = hw;
    885		clk_node->clk_type = gpr_id;
    886		list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
    887	}
    888
    889	return hw;
    890}