cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

clk-cgu.c (15695B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2020 Intel Corporation.
      4 * Zhu YiXin <yixin.zhu@intel.com>
      5 * Rahul Tanwar <rahul.tanwar@intel.com>
      6 */
      7#include <linux/clk-provider.h>
      8#include <linux/device.h>
      9#include <linux/of.h>
     10
     11#include "clk-cgu.h"
     12
     13#define GATE_HW_REG_STAT(reg)	((reg) + 0x0)
     14#define GATE_HW_REG_EN(reg)	((reg) + 0x4)
     15#define GATE_HW_REG_DIS(reg)	((reg) + 0x8)
     16#define MAX_DDIV_REG	8
     17#define MAX_DIVIDER_VAL 64
     18
     19#define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
     20#define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
     21#define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
     22#define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
     23
     24static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
     25					     const struct lgm_clk_branch *list)
     26{
     27	unsigned long flags;
     28
     29	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
     30		spin_lock_irqsave(&ctx->lock, flags);
     31		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
     32				list->div_width, list->div_val);
     33		spin_unlock_irqrestore(&ctx->lock, flags);
     34	}
     35
     36	return clk_hw_register_fixed_rate(NULL, list->name,
     37					  list->parent_data[0].name,
     38					  list->flags, list->mux_flags);
     39}
     40
     41static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
     42{
     43	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
     44	unsigned long flags;
     45	u32 val;
     46
     47	spin_lock_irqsave(&mux->lock, flags);
     48	if (mux->flags & MUX_CLK_SW)
     49		val = mux->reg;
     50	else
     51		val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
     52				      mux->width);
     53	spin_unlock_irqrestore(&mux->lock, flags);
     54	return clk_mux_val_to_index(hw, NULL, mux->flags, val);
     55}
     56
     57static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
     58{
     59	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
     60	unsigned long flags;
     61	u32 val;
     62
     63	val = clk_mux_index_to_val(NULL, mux->flags, index);
     64	spin_lock_irqsave(&mux->lock, flags);
     65	if (mux->flags & MUX_CLK_SW)
     66		mux->reg = val;
     67	else
     68		lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
     69				mux->width, val);
     70	spin_unlock_irqrestore(&mux->lock, flags);
     71
     72	return 0;
     73}
     74
     75static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
     76				      struct clk_rate_request *req)
     77{
     78	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
     79
     80	return clk_mux_determine_rate_flags(hw, req, mux->flags);
     81}
     82
     83static const struct clk_ops lgm_clk_mux_ops = {
     84	.get_parent = lgm_clk_mux_get_parent,
     85	.set_parent = lgm_clk_mux_set_parent,
     86	.determine_rate = lgm_clk_mux_determine_rate,
     87};
     88
     89static struct clk_hw *
     90lgm_clk_register_mux(struct lgm_clk_provider *ctx,
     91		     const struct lgm_clk_branch *list)
     92{
     93	unsigned long flags, cflags = list->mux_flags;
     94	struct device *dev = ctx->dev;
     95	u8 shift = list->mux_shift;
     96	u8 width = list->mux_width;
     97	struct clk_init_data init = {};
     98	struct lgm_clk_mux *mux;
     99	u32 reg = list->mux_off;
    100	struct clk_hw *hw;
    101	int ret;
    102
    103	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
    104	if (!mux)
    105		return ERR_PTR(-ENOMEM);
    106
    107	init.name = list->name;
    108	init.ops = &lgm_clk_mux_ops;
    109	init.flags = list->flags;
    110	init.parent_data = list->parent_data;
    111	init.num_parents = list->num_parents;
    112
    113	mux->membase = ctx->membase;
    114	mux->lock = ctx->lock;
    115	mux->reg = reg;
    116	mux->shift = shift;
    117	mux->width = width;
    118	mux->flags = cflags;
    119	mux->hw.init = &init;
    120
    121	hw = &mux->hw;
    122	ret = devm_clk_hw_register(dev, hw);
    123	if (ret)
    124		return ERR_PTR(ret);
    125
    126	if (cflags & CLOCK_FLAG_VAL_INIT) {
    127		spin_lock_irqsave(&mux->lock, flags);
    128		lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
    129		spin_unlock_irqrestore(&mux->lock, flags);
    130	}
    131
    132	return hw;
    133}
    134
    135static unsigned long
    136lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
    137{
    138	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
    139	unsigned long flags;
    140	unsigned int val;
    141
    142	spin_lock_irqsave(&divider->lock, flags);
    143	val = lgm_get_clk_val(divider->membase, divider->reg,
    144			      divider->shift, divider->width);
    145	spin_unlock_irqrestore(&divider->lock, flags);
    146
    147	return divider_recalc_rate(hw, parent_rate, val, divider->table,
    148				   divider->flags, divider->width);
    149}
    150
    151static long
    152lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
    153			   unsigned long *prate)
    154{
    155	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
    156
    157	return divider_round_rate(hw, rate, prate, divider->table,
    158				  divider->width, divider->flags);
    159}
    160
    161static int
    162lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
    163			 unsigned long prate)
    164{
    165	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
    166	unsigned long flags;
    167	int value;
    168
    169	value = divider_get_val(rate, prate, divider->table,
    170				divider->width, divider->flags);
    171	if (value < 0)
    172		return value;
    173
    174	spin_lock_irqsave(&divider->lock, flags);
    175	lgm_set_clk_val(divider->membase, divider->reg,
    176			divider->shift, divider->width, value);
    177	spin_unlock_irqrestore(&divider->lock, flags);
    178
    179	return 0;
    180}
    181
    182static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
    183{
    184	struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
    185	unsigned long flags;
    186
    187	spin_lock_irqsave(&div->lock, flags);
    188	lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
    189			div->width_gate, enable);
    190	spin_unlock_irqrestore(&div->lock, flags);
    191	return 0;
    192}
    193
    194static int lgm_clk_divider_enable(struct clk_hw *hw)
    195{
    196	return lgm_clk_divider_enable_disable(hw, 1);
    197}
    198
    199static void lgm_clk_divider_disable(struct clk_hw *hw)
    200{
    201	lgm_clk_divider_enable_disable(hw, 0);
    202}
    203
    204static const struct clk_ops lgm_clk_divider_ops = {
    205	.recalc_rate = lgm_clk_divider_recalc_rate,
    206	.round_rate = lgm_clk_divider_round_rate,
    207	.set_rate = lgm_clk_divider_set_rate,
    208	.enable = lgm_clk_divider_enable,
    209	.disable = lgm_clk_divider_disable,
    210};
    211
    212static struct clk_hw *
    213lgm_clk_register_divider(struct lgm_clk_provider *ctx,
    214			 const struct lgm_clk_branch *list)
    215{
    216	unsigned long flags, cflags = list->div_flags;
    217	struct device *dev = ctx->dev;
    218	struct lgm_clk_divider *div;
    219	struct clk_init_data init = {};
    220	u8 shift = list->div_shift;
    221	u8 width = list->div_width;
    222	u8 shift_gate = list->div_shift_gate;
    223	u8 width_gate = list->div_width_gate;
    224	u32 reg = list->div_off;
    225	struct clk_hw *hw;
    226	int ret;
    227
    228	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
    229	if (!div)
    230		return ERR_PTR(-ENOMEM);
    231
    232	init.name = list->name;
    233	init.ops = &lgm_clk_divider_ops;
    234	init.flags = list->flags;
    235	init.parent_data = list->parent_data;
    236	init.num_parents = 1;
    237
    238	div->membase = ctx->membase;
    239	div->lock = ctx->lock;
    240	div->reg = reg;
    241	div->shift = shift;
    242	div->width = width;
    243	div->shift_gate	= shift_gate;
    244	div->width_gate	= width_gate;
    245	div->flags = cflags;
    246	div->table = list->div_table;
    247	div->hw.init = &init;
    248
    249	hw = &div->hw;
    250	ret = devm_clk_hw_register(dev, hw);
    251	if (ret)
    252		return ERR_PTR(ret);
    253
    254	if (cflags & CLOCK_FLAG_VAL_INIT) {
    255		spin_lock_irqsave(&div->lock, flags);
    256		lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
    257		spin_unlock_irqrestore(&div->lock, flags);
    258	}
    259
    260	return hw;
    261}
    262
    263static struct clk_hw *
    264lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
    265			      const struct lgm_clk_branch *list)
    266{
    267	unsigned long flags;
    268	struct clk_hw *hw;
    269
    270	hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
    271					  list->parent_data[0].name, list->flags,
    272					  list->mult, list->div);
    273	if (IS_ERR(hw))
    274		return ERR_CAST(hw);
    275
    276	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
    277		spin_lock_irqsave(&ctx->lock, flags);
    278		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
    279				list->div_width, list->div_val);
    280		spin_unlock_irqrestore(&ctx->lock, flags);
    281	}
    282
    283	return hw;
    284}
    285
    286static int lgm_clk_gate_enable(struct clk_hw *hw)
    287{
    288	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
    289	unsigned long flags;
    290	unsigned int reg;
    291
    292	spin_lock_irqsave(&gate->lock, flags);
    293	reg = GATE_HW_REG_EN(gate->reg);
    294	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
    295	spin_unlock_irqrestore(&gate->lock, flags);
    296
    297	return 0;
    298}
    299
    300static void lgm_clk_gate_disable(struct clk_hw *hw)
    301{
    302	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
    303	unsigned long flags;
    304	unsigned int reg;
    305
    306	spin_lock_irqsave(&gate->lock, flags);
    307	reg = GATE_HW_REG_DIS(gate->reg);
    308	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
    309	spin_unlock_irqrestore(&gate->lock, flags);
    310}
    311
    312static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
    313{
    314	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
    315	unsigned int reg, ret;
    316	unsigned long flags;
    317
    318	spin_lock_irqsave(&gate->lock, flags);
    319	reg = GATE_HW_REG_STAT(gate->reg);
    320	ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
    321	spin_unlock_irqrestore(&gate->lock, flags);
    322
    323	return ret;
    324}
    325
    326static const struct clk_ops lgm_clk_gate_ops = {
    327	.enable = lgm_clk_gate_enable,
    328	.disable = lgm_clk_gate_disable,
    329	.is_enabled = lgm_clk_gate_is_enabled,
    330};
    331
    332static struct clk_hw *
    333lgm_clk_register_gate(struct lgm_clk_provider *ctx,
    334		      const struct lgm_clk_branch *list)
    335{
    336	unsigned long flags, cflags = list->gate_flags;
    337	const char *pname = list->parent_data[0].name;
    338	struct device *dev = ctx->dev;
    339	u8 shift = list->gate_shift;
    340	struct clk_init_data init = {};
    341	struct lgm_clk_gate *gate;
    342	u32 reg = list->gate_off;
    343	struct clk_hw *hw;
    344	int ret;
    345
    346	gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
    347	if (!gate)
    348		return ERR_PTR(-ENOMEM);
    349
    350	init.name = list->name;
    351	init.ops = &lgm_clk_gate_ops;
    352	init.flags = list->flags;
    353	init.parent_names = pname ? &pname : NULL;
    354	init.num_parents = pname ? 1 : 0;
    355
    356	gate->membase = ctx->membase;
    357	gate->lock = ctx->lock;
    358	gate->reg = reg;
    359	gate->shift = shift;
    360	gate->flags = cflags;
    361	gate->hw.init = &init;
    362
    363	hw = &gate->hw;
    364	ret = devm_clk_hw_register(dev, hw);
    365	if (ret)
    366		return ERR_PTR(ret);
    367
    368	if (cflags & CLOCK_FLAG_VAL_INIT) {
    369		spin_lock_irqsave(&gate->lock, flags);
    370		lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
    371		spin_unlock_irqrestore(&gate->lock, flags);
    372	}
    373
    374	return hw;
    375}
    376
    377int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
    378			      const struct lgm_clk_branch *list,
    379			      unsigned int nr_clk)
    380{
    381	struct clk_hw *hw;
    382	unsigned int idx;
    383
    384	for (idx = 0; idx < nr_clk; idx++, list++) {
    385		switch (list->type) {
    386		case CLK_TYPE_FIXED:
    387			hw = lgm_clk_register_fixed(ctx, list);
    388			break;
    389		case CLK_TYPE_MUX:
    390			hw = lgm_clk_register_mux(ctx, list);
    391			break;
    392		case CLK_TYPE_DIVIDER:
    393			hw = lgm_clk_register_divider(ctx, list);
    394			break;
    395		case CLK_TYPE_FIXED_FACTOR:
    396			hw = lgm_clk_register_fixed_factor(ctx, list);
    397			break;
    398		case CLK_TYPE_GATE:
    399			hw = lgm_clk_register_gate(ctx, list);
    400			break;
    401		default:
    402			dev_err(ctx->dev, "invalid clk type\n");
    403			return -EINVAL;
    404		}
    405
    406		if (IS_ERR(hw)) {
    407			dev_err(ctx->dev,
    408				"register clk: %s, type: %u failed!\n",
    409				list->name, list->type);
    410			return -EIO;
    411		}
    412		ctx->clk_data.hws[list->id] = hw;
    413	}
    414
    415	return 0;
    416}
    417
    418static unsigned long
    419lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
    420{
    421	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
    422	unsigned int div0, div1, exdiv;
    423	u64 prate;
    424
    425	div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
    426			       ddiv->shift0, ddiv->width0) + 1;
    427	div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
    428			       ddiv->shift1, ddiv->width1) + 1;
    429	exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
    430				ddiv->shift2, ddiv->width2);
    431	prate = (u64)parent_rate;
    432	do_div(prate, div0);
    433	do_div(prate, div1);
    434
    435	if (exdiv) {
    436		do_div(prate, ddiv->div);
    437		prate *= ddiv->mult;
    438	}
    439
    440	return prate;
    441}
    442
    443static int lgm_clk_ddiv_enable(struct clk_hw *hw)
    444{
    445	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
    446	unsigned long flags;
    447
    448	spin_lock_irqsave(&ddiv->lock, flags);
    449	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
    450			ddiv->width_gate, 1);
    451	spin_unlock_irqrestore(&ddiv->lock, flags);
    452	return 0;
    453}
    454
    455static void lgm_clk_ddiv_disable(struct clk_hw *hw)
    456{
    457	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
    458	unsigned long flags;
    459
    460	spin_lock_irqsave(&ddiv->lock, flags);
    461	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
    462			ddiv->width_gate, 0);
    463	spin_unlock_irqrestore(&ddiv->lock, flags);
    464}
    465
    466static int
    467lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
    468{
    469	u32 idx, temp;
    470
    471	*ddiv1 = 1;
    472	*ddiv2 = 1;
    473
    474	if (div > MAX_DIVIDER_VAL)
    475		div = MAX_DIVIDER_VAL;
    476
    477	if (div > 1) {
    478		for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
    479			temp = DIV_ROUND_UP_ULL((u64)div, idx);
    480			if (div % idx == 0 && temp <= MAX_DDIV_REG)
    481				break;
    482		}
    483
    484		if (idx > MAX_DDIV_REG)
    485			return -EINVAL;
    486
    487		*ddiv1 = temp;
    488		*ddiv2 = idx;
    489	}
    490
    491	return 0;
    492}
    493
    494static int
    495lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
    496		      unsigned long prate)
    497{
    498	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
    499	u32 div, ddiv1, ddiv2;
    500	unsigned long flags;
    501
    502	div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
    503
    504	spin_lock_irqsave(&ddiv->lock, flags);
    505	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
    506		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
    507		div = div * 2;
    508	}
    509
    510	if (div <= 0) {
    511		spin_unlock_irqrestore(&ddiv->lock, flags);
    512		return -EINVAL;
    513	}
    514
    515	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
    516		spin_unlock_irqrestore(&ddiv->lock, flags);
    517		return -EINVAL;
    518	}
    519
    520	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
    521			ddiv1 - 1);
    522
    523	lgm_set_clk_val(ddiv->membase, ddiv->reg,  ddiv->shift1, ddiv->width1,
    524			ddiv2 - 1);
    525	spin_unlock_irqrestore(&ddiv->lock, flags);
    526
    527	return 0;
    528}
    529
    530static long
    531lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
    532			unsigned long *prate)
    533{
    534	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
    535	u32 div, ddiv1, ddiv2;
    536	unsigned long flags;
    537	u64 rate64;
    538
    539	div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
    540
    541	/* if predivide bit is enabled, modify div by factor of 2.5 */
    542	spin_lock_irqsave(&ddiv->lock, flags);
    543	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
    544		div = div * 2;
    545		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
    546	}
    547	spin_unlock_irqrestore(&ddiv->lock, flags);
    548
    549	if (div <= 0)
    550		return *prate;
    551
    552	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
    553		if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
    554			return -EINVAL;
    555
    556	rate64 = *prate;
    557	do_div(rate64, ddiv1);
    558	do_div(rate64, ddiv2);
    559
    560	/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
    561	spin_lock_irqsave(&ddiv->lock, flags);
    562	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
    563		rate64 = rate64 * 2;
    564		rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
    565	}
    566	spin_unlock_irqrestore(&ddiv->lock, flags);
    567
    568	return rate64;
    569}
    570
    571static const struct clk_ops lgm_clk_ddiv_ops = {
    572	.recalc_rate = lgm_clk_ddiv_recalc_rate,
    573	.enable	= lgm_clk_ddiv_enable,
    574	.disable = lgm_clk_ddiv_disable,
    575	.set_rate = lgm_clk_ddiv_set_rate,
    576	.round_rate = lgm_clk_ddiv_round_rate,
    577};
    578
    579int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
    580			  const struct lgm_clk_ddiv_data *list,
    581			  unsigned int nr_clk)
    582{
    583	struct device *dev = ctx->dev;
    584	struct clk_hw *hw;
    585	unsigned int idx;
    586	int ret;
    587
    588	for (idx = 0; idx < nr_clk; idx++, list++) {
    589		struct clk_init_data init = {};
    590		struct lgm_clk_ddiv *ddiv;
    591
    592		ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
    593		if (!ddiv)
    594			return -ENOMEM;
    595
    596		init.name = list->name;
    597		init.ops = &lgm_clk_ddiv_ops;
    598		init.flags = list->flags;
    599		init.parent_data = list->parent_data;
    600		init.num_parents = 1;
    601
    602		ddiv->membase = ctx->membase;
    603		ddiv->lock = ctx->lock;
    604		ddiv->reg = list->reg;
    605		ddiv->shift0 = list->shift0;
    606		ddiv->width0 = list->width0;
    607		ddiv->shift1 = list->shift1;
    608		ddiv->width1 = list->width1;
    609		ddiv->shift_gate = list->shift_gate;
    610		ddiv->width_gate = list->width_gate;
    611		ddiv->shift2 = list->ex_shift;
    612		ddiv->width2 = list->ex_width;
    613		ddiv->flags = list->div_flags;
    614		ddiv->mult = 2;
    615		ddiv->div = 5;
    616		ddiv->hw.init = &init;
    617
    618		hw = &ddiv->hw;
    619		ret = devm_clk_hw_register(dev, hw);
    620		if (ret) {
    621			dev_err(dev, "register clk: %s failed!\n", list->name);
    622			return ret;
    623		}
    624		ctx->clk_data.hws[list->id] = hw;
    625	}
    626
    627	return 0;
    628}