cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ccu-div.c (15288B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
      4 *
      5 * Authors:
      6 *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
      7 *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
      8 *
      9 * Baikal-T1 CCU Dividers interface driver
     10 */
     11
     12#define pr_fmt(fmt) "bt1-ccu-div: " fmt
     13
     14#include <linux/kernel.h>
     15#include <linux/printk.h>
     16#include <linux/bits.h>
     17#include <linux/bitfield.h>
     18#include <linux/slab.h>
     19#include <linux/clk-provider.h>
     20#include <linux/of.h>
     21#include <linux/spinlock.h>
     22#include <linux/regmap.h>
     23#include <linux/delay.h>
     24#include <linux/time64.h>
     25#include <linux/debugfs.h>
     26
     27#include "ccu-div.h"
     28
     29#define CCU_DIV_CTL			0x00
     30#define CCU_DIV_CTL_EN			BIT(0)
     31#define CCU_DIV_CTL_RST			BIT(1)
     32#define CCU_DIV_CTL_SET_CLKDIV		BIT(2)
     33#define CCU_DIV_CTL_CLKDIV_FLD		4
     34#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
     35	GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
     36#define CCU_DIV_CTL_LOCK_SHIFTED	BIT(27)
     37#define CCU_DIV_CTL_LOCK_NORMAL		BIT(31)
     38
     39#define CCU_DIV_RST_DELAY_US		1
     40#define CCU_DIV_LOCK_CHECK_RETRIES	50
     41
     42#define CCU_DIV_CLKDIV_MIN		0
     43#define CCU_DIV_CLKDIV_MAX(_mask) \
     44	((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
     45
     46/*
     47 * Use the next two methods until there are generic field setter and
     48 * getter available with non-constant mask support.
     49 */
     50static inline u32 ccu_div_get(u32 mask, u32 val)
     51{
     52	return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
     53}
     54
     55static inline u32 ccu_div_prep(u32 mask, u32 val)
     56{
     57	return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
     58}
     59
     60static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
     61						  unsigned long div)
     62{
     63	u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
     64
     65	do_div(ns, ref_clk);
     66
     67	return ns;
     68}
     69
     70static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
     71					      unsigned long div)
     72{
     73	return ref_clk / (div ?: 1);
     74}
     75
     76static int ccu_div_var_update_clkdiv(struct ccu_div *div,
     77				     unsigned long parent_rate,
     78				     unsigned long divider)
     79{
     80	unsigned long nd;
     81	u32 val = 0;
     82	u32 lock;
     83	int count;
     84
     85	nd = ccu_div_lock_delay_ns(parent_rate, divider);
     86
     87	if (div->features & CCU_DIV_LOCK_SHIFTED)
     88		lock = CCU_DIV_CTL_LOCK_SHIFTED;
     89	else
     90		lock = CCU_DIV_CTL_LOCK_NORMAL;
     91
     92	regmap_update_bits(div->sys_regs, div->reg_ctl,
     93			   CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
     94
     95	/*
     96	 * Until there is nsec-version of readl_poll_timeout() is available
     97	 * we have to implement the next polling loop.
     98	 */
     99	count = CCU_DIV_LOCK_CHECK_RETRIES;
    100	do {
    101		ndelay(nd);
    102		regmap_read(div->sys_regs, div->reg_ctl, &val);
    103		if (val & lock)
    104			return 0;
    105	} while (--count);
    106
    107	return -ETIMEDOUT;
    108}
    109
    110static int ccu_div_var_enable(struct clk_hw *hw)
    111{
    112	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
    113	struct ccu_div *div = to_ccu_div(hw);
    114	unsigned long flags;
    115	u32 val = 0;
    116	int ret;
    117
    118	if (!parent_hw) {
    119		pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
    120		return -EINVAL;
    121	}
    122
    123	regmap_read(div->sys_regs, div->reg_ctl, &val);
    124	if (val & CCU_DIV_CTL_EN)
    125		return 0;
    126
    127	spin_lock_irqsave(&div->lock, flags);
    128	ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
    129					ccu_div_get(div->mask, val));
    130	if (!ret)
    131		regmap_update_bits(div->sys_regs, div->reg_ctl,
    132				   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
    133	spin_unlock_irqrestore(&div->lock, flags);
    134	if (ret)
    135		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
    136
    137	return ret;
    138}
    139
    140static int ccu_div_gate_enable(struct clk_hw *hw)
    141{
    142	struct ccu_div *div = to_ccu_div(hw);
    143	unsigned long flags;
    144
    145	spin_lock_irqsave(&div->lock, flags);
    146	regmap_update_bits(div->sys_regs, div->reg_ctl,
    147			   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
    148	spin_unlock_irqrestore(&div->lock, flags);
    149
    150	return 0;
    151}
    152
    153static void ccu_div_gate_disable(struct clk_hw *hw)
    154{
    155	struct ccu_div *div = to_ccu_div(hw);
    156	unsigned long flags;
    157
    158	spin_lock_irqsave(&div->lock, flags);
    159	regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
    160	spin_unlock_irqrestore(&div->lock, flags);
    161}
    162
    163static int ccu_div_gate_is_enabled(struct clk_hw *hw)
    164{
    165	struct ccu_div *div = to_ccu_div(hw);
    166	u32 val = 0;
    167
    168	regmap_read(div->sys_regs, div->reg_ctl, &val);
    169
    170	return !!(val & CCU_DIV_CTL_EN);
    171}
    172
    173static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
    174					     unsigned long parent_rate)
    175{
    176	struct ccu_div *div = to_ccu_div(hw);
    177	unsigned long divider;
    178	u32 val = 0;
    179
    180	regmap_read(div->sys_regs, div->reg_ctl, &val);
    181	divider = ccu_div_get(div->mask, val);
    182
    183	return ccu_div_calc_freq(parent_rate, divider);
    184}
    185
    186static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
    187						     unsigned long parent_rate,
    188						     unsigned int mask)
    189{
    190	unsigned long divider;
    191
    192	divider = parent_rate / rate;
    193	return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
    194		       CCU_DIV_CLKDIV_MAX(mask));
    195}
    196
    197static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
    198				   unsigned long *parent_rate)
    199{
    200	struct ccu_div *div = to_ccu_div(hw);
    201	unsigned long divider;
    202
    203	divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
    204
    205	return ccu_div_calc_freq(*parent_rate, divider);
    206}
    207
    208/*
    209 * This method is used for the clock divider blocks, which support the
    210 * on-the-fly rate change. So due to lacking the EN bit functionality
    211 * they can't be gated before the rate adjustment.
    212 */
    213static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
    214				     unsigned long parent_rate)
    215{
    216	struct ccu_div *div = to_ccu_div(hw);
    217	unsigned long flags, divider;
    218	u32 val;
    219	int ret;
    220
    221	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
    222	if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
    223		divider = 0;
    224	} else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
    225		if (divider == 1 || divider == 2)
    226			divider = 0;
    227		else if (divider == 3)
    228			divider = 4;
    229	}
    230
    231	val = ccu_div_prep(div->mask, divider);
    232
    233	spin_lock_irqsave(&div->lock, flags);
    234	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
    235	ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
    236	spin_unlock_irqrestore(&div->lock, flags);
    237	if (ret)
    238		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
    239
    240	return ret;
    241}
    242
    243/*
    244 * This method is used for the clock divider blocks, which don't support
    245 * the on-the-fly rate change.
    246 */
    247static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
    248				     unsigned long parent_rate)
    249{
    250	struct ccu_div *div = to_ccu_div(hw);
    251	unsigned long flags, divider;
    252	u32 val;
    253
    254	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
    255	val = ccu_div_prep(div->mask, divider);
    256
    257	/*
    258	 * Also disable the clock divider block if it was enabled by default
    259	 * or by the bootloader.
    260	 */
    261	spin_lock_irqsave(&div->lock, flags);
    262	regmap_update_bits(div->sys_regs, div->reg_ctl,
    263			   div->mask | CCU_DIV_CTL_EN, val);
    264	spin_unlock_irqrestore(&div->lock, flags);
    265
    266	return 0;
    267}
    268
    269static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
    270					       unsigned long parent_rate)
    271{
    272	struct ccu_div *div = to_ccu_div(hw);
    273
    274	return ccu_div_calc_freq(parent_rate, div->divider);
    275}
    276
    277static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
    278				     unsigned long *parent_rate)
    279{
    280	struct ccu_div *div = to_ccu_div(hw);
    281
    282	return ccu_div_calc_freq(*parent_rate, div->divider);
    283}
    284
    285static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
    286				  unsigned long parent_rate)
    287{
    288	return 0;
    289}
    290
    291int ccu_div_reset_domain(struct ccu_div *div)
    292{
    293	unsigned long flags;
    294
    295	if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
    296		return -EINVAL;
    297
    298	spin_lock_irqsave(&div->lock, flags);
    299	regmap_update_bits(div->sys_regs, div->reg_ctl,
    300			   CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
    301	spin_unlock_irqrestore(&div->lock, flags);
    302
    303	/* The next delay must be enough to cover all the resets. */
    304	udelay(CCU_DIV_RST_DELAY_US);
    305
    306	return 0;
    307}
    308
    309#ifdef CONFIG_DEBUG_FS
    310
    311struct ccu_div_dbgfs_bit {
    312	struct ccu_div *div;
    313	const char *name;
    314	u32 mask;
    315};
    316
    317#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) {	\
    318		.name = _name,			\
    319		.mask = _mask			\
    320	}
    321
    322static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
    323	CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
    324	CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
    325	CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
    326	CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
    327};
    328
    329#define CCU_DIV_DBGFS_BIT_NUM	ARRAY_SIZE(ccu_div_bits)
    330
    331/*
    332 * It can be dangerous to change the Divider settings behind clock framework
    333 * back, therefore we don't provide any kernel config based compile time option
    334 * for this feature to enable.
    335 */
    336#undef CCU_DIV_ALLOW_WRITE_DEBUGFS
    337#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
    338
    339static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
    340{
    341	const struct ccu_div_dbgfs_bit *bit = priv;
    342	struct ccu_div *div = bit->div;
    343	unsigned long flags;
    344
    345	spin_lock_irqsave(&div->lock, flags);
    346	regmap_update_bits(div->sys_regs, div->reg_ctl,
    347			   bit->mask, val ? bit->mask : 0);
    348	spin_unlock_irqrestore(&div->lock, flags);
    349
    350	return 0;
    351}
    352
    353static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
    354{
    355	struct ccu_div *div = priv;
    356	unsigned long flags;
    357	u32 data;
    358
    359	val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
    360		      CCU_DIV_CLKDIV_MAX(div->mask));
    361	data = ccu_div_prep(div->mask, val);
    362
    363	spin_lock_irqsave(&div->lock, flags);
    364	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
    365	spin_unlock_irqrestore(&div->lock, flags);
    366
    367	return 0;
    368}
    369
    370#define ccu_div_dbgfs_mode		0644
    371
    372#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
    373
    374#define ccu_div_dbgfs_bit_set		NULL
    375#define ccu_div_dbgfs_var_clkdiv_set	NULL
    376#define ccu_div_dbgfs_mode		0444
    377
    378#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
    379
    380static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
    381{
    382	const struct ccu_div_dbgfs_bit *bit = priv;
    383	struct ccu_div *div = bit->div;
    384	u32 data = 0;
    385
    386	regmap_read(div->sys_regs, div->reg_ctl, &data);
    387	*val = !!(data & bit->mask);
    388
    389	return 0;
    390}
    391DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
    392	ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
    393
    394static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
    395{
    396	struct ccu_div *div = priv;
    397	u32 data = 0;
    398
    399	regmap_read(div->sys_regs, div->reg_ctl, &data);
    400	*val = ccu_div_get(div->mask, data);
    401
    402	return 0;
    403}
    404DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
    405	ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
    406
    407static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
    408{
    409	struct ccu_div *div = priv;
    410
    411	*val = div->divider;
    412
    413	return 0;
    414}
    415DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
    416	ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
    417
    418static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
    419{
    420	struct ccu_div *div = to_ccu_div(hw);
    421	struct ccu_div_dbgfs_bit *bits;
    422	int didx, bidx, num = 2;
    423	const char *name;
    424
    425	num += !!(div->flags & CLK_SET_RATE_GATE) +
    426		!!(div->features & CCU_DIV_RESET_DOMAIN);
    427
    428	bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
    429	if (!bits)
    430		return;
    431
    432	for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
    433		name = ccu_div_bits[bidx].name;
    434		if (!(div->flags & CLK_SET_RATE_GATE) &&
    435		    !strcmp("div_en", name)) {
    436			continue;
    437		}
    438
    439		if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
    440		    !strcmp("div_rst", name)) {
    441			continue;
    442		}
    443
    444		bits[didx] = ccu_div_bits[bidx];
    445		bits[didx].div = div;
    446
    447		if (div->features & CCU_DIV_LOCK_SHIFTED &&
    448		    !strcmp("div_lock", name)) {
    449			bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
    450		}
    451
    452		debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
    453					   dentry, &bits[didx],
    454					   &ccu_div_dbgfs_bit_fops);
    455		++didx;
    456	}
    457
    458	debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
    459				   div, &ccu_div_dbgfs_var_clkdiv_fops);
    460}
    461
    462static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
    463{
    464	struct ccu_div *div = to_ccu_div(hw);
    465	struct ccu_div_dbgfs_bit *bit;
    466
    467	bit = kmalloc(sizeof(*bit), GFP_KERNEL);
    468	if (!bit)
    469		return;
    470
    471	*bit = ccu_div_bits[0];
    472	bit->div = div;
    473	debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
    474				   &ccu_div_dbgfs_bit_fops);
    475
    476	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
    477				   &ccu_div_dbgfs_fixed_clkdiv_fops);
    478}
    479
    480static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
    481{
    482	struct ccu_div *div = to_ccu_div(hw);
    483
    484	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
    485				   &ccu_div_dbgfs_fixed_clkdiv_fops);
    486}
    487
    488#else /* !CONFIG_DEBUG_FS */
    489
    490#define ccu_div_var_debug_init NULL
    491#define ccu_div_gate_debug_init NULL
    492#define ccu_div_fixed_debug_init NULL
    493
    494#endif /* !CONFIG_DEBUG_FS */
    495
    496static const struct clk_ops ccu_div_var_gate_to_set_ops = {
    497	.enable = ccu_div_var_enable,
    498	.disable = ccu_div_gate_disable,
    499	.is_enabled = ccu_div_gate_is_enabled,
    500	.recalc_rate = ccu_div_var_recalc_rate,
    501	.round_rate = ccu_div_var_round_rate,
    502	.set_rate = ccu_div_var_set_rate_fast,
    503	.debug_init = ccu_div_var_debug_init
    504};
    505
    506static const struct clk_ops ccu_div_var_nogate_ops = {
    507	.recalc_rate = ccu_div_var_recalc_rate,
    508	.round_rate = ccu_div_var_round_rate,
    509	.set_rate = ccu_div_var_set_rate_slow,
    510	.debug_init = ccu_div_var_debug_init
    511};
    512
    513static const struct clk_ops ccu_div_gate_ops = {
    514	.enable = ccu_div_gate_enable,
    515	.disable = ccu_div_gate_disable,
    516	.is_enabled = ccu_div_gate_is_enabled,
    517	.recalc_rate = ccu_div_fixed_recalc_rate,
    518	.round_rate = ccu_div_fixed_round_rate,
    519	.set_rate = ccu_div_fixed_set_rate,
    520	.debug_init = ccu_div_gate_debug_init
    521};
    522
    523static const struct clk_ops ccu_div_fixed_ops = {
    524	.recalc_rate = ccu_div_fixed_recalc_rate,
    525	.round_rate = ccu_div_fixed_round_rate,
    526	.set_rate = ccu_div_fixed_set_rate,
    527	.debug_init = ccu_div_fixed_debug_init
    528};
    529
    530struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
    531{
    532	struct clk_parent_data parent_data = { };
    533	struct clk_init_data hw_init = { };
    534	struct ccu_div *div;
    535	int ret;
    536
    537	if (!div_init)
    538		return ERR_PTR(-EINVAL);
    539
    540	div = kzalloc(sizeof(*div), GFP_KERNEL);
    541	if (!div)
    542		return ERR_PTR(-ENOMEM);
    543
    544	/*
    545	 * Note since Baikal-T1 System Controller registers are MMIO-backed
    546	 * we won't check the regmap IO operations return status, because it
    547	 * must be zero anyway.
    548	 */
    549	div->hw.init = &hw_init;
    550	div->id = div_init->id;
    551	div->reg_ctl = div_init->base + CCU_DIV_CTL;
    552	div->sys_regs = div_init->sys_regs;
    553	div->flags = div_init->flags;
    554	div->features = div_init->features;
    555	spin_lock_init(&div->lock);
    556
    557	hw_init.name = div_init->name;
    558	hw_init.flags = div_init->flags;
    559
    560	if (div_init->type == CCU_DIV_VAR) {
    561		if (hw_init.flags & CLK_SET_RATE_GATE)
    562			hw_init.ops = &ccu_div_var_gate_to_set_ops;
    563		else
    564			hw_init.ops = &ccu_div_var_nogate_ops;
    565		div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
    566	} else if (div_init->type == CCU_DIV_GATE) {
    567		hw_init.ops = &ccu_div_gate_ops;
    568		div->divider = div_init->divider;
    569	} else if (div_init->type == CCU_DIV_FIXED) {
    570		hw_init.ops = &ccu_div_fixed_ops;
    571		div->divider = div_init->divider;
    572	} else {
    573		ret = -EINVAL;
    574		goto err_free_div;
    575	}
    576
    577	if (!div_init->parent_name) {
    578		ret = -EINVAL;
    579		goto err_free_div;
    580	}
    581	parent_data.fw_name = div_init->parent_name;
    582	hw_init.parent_data = &parent_data;
    583	hw_init.num_parents = 1;
    584
    585	ret = of_clk_hw_register(div_init->np, &div->hw);
    586	if (ret)
    587		goto err_free_div;
    588
    589	return div;
    590
    591err_free_div:
    592	kfree(div);
    593
    594	return ERR_PTR(ret);
    595}
    596
    597void ccu_div_hw_unregister(struct ccu_div *div)
    598{
    599	clk_hw_unregister(&div->hw);
    600
    601	kfree(div);
    602}