cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpg.c (10995B)


      1/*
      2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
      3 *
      4 *  Copyright (C) 2010  Magnus Damm
      5 *  Copyright (C) 2010 - 2012  Paul Mundt
      6 *
      7 * This file is subject to the terms and conditions of the GNU General Public
      8 * License.  See the file "COPYING" in the main directory of this archive
      9 * for more details.
     10 */
     11#include <linux/clk.h>
     12#include <linux/compiler.h>
     13#include <linux/slab.h>
     14#include <linux/io.h>
     15#include <linux/sh_clk.h>
     16
     17#define CPG_CKSTP_BIT	BIT(8)
     18
     19static unsigned int sh_clk_read(struct clk *clk)
     20{
     21	if (clk->flags & CLK_ENABLE_REG_8BIT)
     22		return ioread8(clk->mapped_reg);
     23	else if (clk->flags & CLK_ENABLE_REG_16BIT)
     24		return ioread16(clk->mapped_reg);
     25
     26	return ioread32(clk->mapped_reg);
     27}
     28
     29static void sh_clk_write(int value, struct clk *clk)
     30{
     31	if (clk->flags & CLK_ENABLE_REG_8BIT)
     32		iowrite8(value, clk->mapped_reg);
     33	else if (clk->flags & CLK_ENABLE_REG_16BIT)
     34		iowrite16(value, clk->mapped_reg);
     35	else
     36		iowrite32(value, clk->mapped_reg);
     37}
     38
     39static int sh_clk_mstp_enable(struct clk *clk)
     40{
     41	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
     42	if (clk->status_reg) {
     43		unsigned int (*read)(const void __iomem *addr);
     44		int i;
     45		void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
     46			(phys_addr_t)clk->enable_reg + clk->mapped_reg;
     47
     48		if (clk->flags & CLK_ENABLE_REG_8BIT)
     49			read = ioread8;
     50		else if (clk->flags & CLK_ENABLE_REG_16BIT)
     51			read = ioread16;
     52		else
     53			read = ioread32;
     54
     55		for (i = 1000;
     56		     (read(mapped_status) & (1 << clk->enable_bit)) && i;
     57		     i--)
     58			cpu_relax();
     59		if (!i) {
     60			pr_err("cpg: failed to enable %p[%d]\n",
     61			       clk->enable_reg, clk->enable_bit);
     62			return -ETIMEDOUT;
     63		}
     64	}
     65	return 0;
     66}
     67
     68static void sh_clk_mstp_disable(struct clk *clk)
     69{
     70	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
     71}
     72
     73static struct sh_clk_ops sh_clk_mstp_clk_ops = {
     74	.enable		= sh_clk_mstp_enable,
     75	.disable	= sh_clk_mstp_disable,
     76	.recalc		= followparent_recalc,
     77};
     78
     79int __init sh_clk_mstp_register(struct clk *clks, int nr)
     80{
     81	struct clk *clkp;
     82	int ret = 0;
     83	int k;
     84
     85	for (k = 0; !ret && (k < nr); k++) {
     86		clkp = clks + k;
     87		clkp->ops = &sh_clk_mstp_clk_ops;
     88		ret |= clk_register(clkp);
     89	}
     90
     91	return ret;
     92}
     93
     94/*
     95 * Div/mult table lookup helpers
     96 */
     97static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
     98{
     99	return clk->priv;
    100}
    101
    102static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
    103{
    104	return clk_to_div_table(clk)->div_mult_table;
    105}
    106
    107/*
    108 * Common div ops
    109 */
    110static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
    111{
    112	return clk_rate_table_round(clk, clk->freq_table, rate);
    113}
    114
    115static unsigned long sh_clk_div_recalc(struct clk *clk)
    116{
    117	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
    118	unsigned int idx;
    119
    120	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
    121			     table, clk->arch_flags ? &clk->arch_flags : NULL);
    122
    123	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
    124
    125	return clk->freq_table[idx].frequency;
    126}
    127
    128static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
    129{
    130	struct clk_div_table *dt = clk_to_div_table(clk);
    131	unsigned long value;
    132	int idx;
    133
    134	idx = clk_rate_table_find(clk, clk->freq_table, rate);
    135	if (idx < 0)
    136		return idx;
    137
    138	value = sh_clk_read(clk);
    139	value &= ~(clk->div_mask << clk->enable_bit);
    140	value |= (idx << clk->enable_bit);
    141	sh_clk_write(value, clk);
    142
    143	/* XXX: Should use a post-change notifier */
    144	if (dt->kick)
    145		dt->kick(clk);
    146
    147	return 0;
    148}
    149
    150static int sh_clk_div_enable(struct clk *clk)
    151{
    152	if (clk->div_mask == SH_CLK_DIV6_MSK) {
    153		int ret = sh_clk_div_set_rate(clk, clk->rate);
    154		if (ret < 0)
    155			return ret;
    156	}
    157
    158	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
    159	return 0;
    160}
    161
    162static void sh_clk_div_disable(struct clk *clk)
    163{
    164	unsigned int val;
    165
    166	val = sh_clk_read(clk);
    167	val |= CPG_CKSTP_BIT;
    168
    169	/*
    170	 * div6 clocks require the divisor field to be non-zero or the
    171	 * above CKSTP toggle silently fails. Ensure that the divisor
    172	 * array is reset to its initial state on disable.
    173	 */
    174	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
    175		val |= clk->div_mask;
    176
    177	sh_clk_write(val, clk);
    178}
    179
    180static struct sh_clk_ops sh_clk_div_clk_ops = {
    181	.recalc		= sh_clk_div_recalc,
    182	.set_rate	= sh_clk_div_set_rate,
    183	.round_rate	= sh_clk_div_round_rate,
    184};
    185
    186static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
    187	.recalc		= sh_clk_div_recalc,
    188	.set_rate	= sh_clk_div_set_rate,
    189	.round_rate	= sh_clk_div_round_rate,
    190	.enable		= sh_clk_div_enable,
    191	.disable	= sh_clk_div_disable,
    192};
    193
    194static int __init sh_clk_init_parent(struct clk *clk)
    195{
    196	u32 val;
    197
    198	if (clk->parent)
    199		return 0;
    200
    201	if (!clk->parent_table || !clk->parent_num)
    202		return 0;
    203
    204	if (!clk->src_width) {
    205		pr_err("sh_clk_init_parent: cannot select parent clock\n");
    206		return -EINVAL;
    207	}
    208
    209	val  = (sh_clk_read(clk) >> clk->src_shift);
    210	val &= (1 << clk->src_width) - 1;
    211
    212	if (val >= clk->parent_num) {
    213		pr_err("sh_clk_init_parent: parent table size failed\n");
    214		return -EINVAL;
    215	}
    216
    217	clk_reparent(clk, clk->parent_table[val]);
    218	if (!clk->parent) {
    219		pr_err("sh_clk_init_parent: unable to set parent");
    220		return -EINVAL;
    221	}
    222
    223	return 0;
    224}
    225
    226static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
    227			struct clk_div_table *table, struct sh_clk_ops *ops)
    228{
    229	struct clk *clkp;
    230	void *freq_table;
    231	int nr_divs = table->div_mult_table->nr_divisors;
    232	int freq_table_size = sizeof(struct cpufreq_frequency_table);
    233	int ret = 0;
    234	int k;
    235
    236	freq_table_size *= (nr_divs + 1);
    237	freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
    238	if (!freq_table) {
    239		pr_err("%s: unable to alloc memory\n", __func__);
    240		return -ENOMEM;
    241	}
    242
    243	for (k = 0; !ret && (k < nr); k++) {
    244		clkp = clks + k;
    245
    246		clkp->ops = ops;
    247		clkp->priv = table;
    248
    249		clkp->freq_table = freq_table + (k * freq_table_size);
    250		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
    251
    252		ret = clk_register(clkp);
    253		if (ret == 0)
    254			ret = sh_clk_init_parent(clkp);
    255	}
    256
    257	return ret;
    258}
    259
    260/*
    261 * div6 support
    262 */
    263static int sh_clk_div6_divisors[64] = {
    264	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
    265	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
    266	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
    267	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
    268};
    269
    270static struct clk_div_mult_table div6_div_mult_table = {
    271	.divisors = sh_clk_div6_divisors,
    272	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
    273};
    274
    275static struct clk_div_table sh_clk_div6_table = {
    276	.div_mult_table	= &div6_div_mult_table,
    277};
    278
    279static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
    280{
    281	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
    282	u32 value;
    283	int ret, i;
    284
    285	if (!clk->parent_table || !clk->parent_num)
    286		return -EINVAL;
    287
    288	/* Search the parent */
    289	for (i = 0; i < clk->parent_num; i++)
    290		if (clk->parent_table[i] == parent)
    291			break;
    292
    293	if (i == clk->parent_num)
    294		return -ENODEV;
    295
    296	ret = clk_reparent(clk, parent);
    297	if (ret < 0)
    298		return ret;
    299
    300	value = sh_clk_read(clk) &
    301		~(((1 << clk->src_width) - 1) << clk->src_shift);
    302
    303	sh_clk_write(value | (i << clk->src_shift), clk);
    304
    305	/* Rebuild the frequency table */
    306	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
    307			     table, NULL);
    308
    309	return 0;
    310}
    311
    312static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
    313	.recalc		= sh_clk_div_recalc,
    314	.round_rate	= sh_clk_div_round_rate,
    315	.set_rate	= sh_clk_div_set_rate,
    316	.enable		= sh_clk_div_enable,
    317	.disable	= sh_clk_div_disable,
    318	.set_parent	= sh_clk_div6_set_parent,
    319};
    320
    321int __init sh_clk_div6_register(struct clk *clks, int nr)
    322{
    323	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
    324				       &sh_clk_div_enable_clk_ops);
    325}
    326
    327int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
    328{
    329	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
    330				       &sh_clk_div6_reparent_clk_ops);
    331}
    332
    333/*
    334 * div4 support
    335 */
    336static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
    337{
    338	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
    339	u32 value;
    340	int ret;
    341
    342	/* we really need a better way to determine parent index, but for
    343	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
    344	 * no CLK_ENABLE_ON_INIT means external clock...
    345	 */
    346
    347	if (parent->flags & CLK_ENABLE_ON_INIT)
    348		value = sh_clk_read(clk) & ~(1 << 7);
    349	else
    350		value = sh_clk_read(clk) | (1 << 7);
    351
    352	ret = clk_reparent(clk, parent);
    353	if (ret < 0)
    354		return ret;
    355
    356	sh_clk_write(value, clk);
    357
    358	/* Rebiuld the frequency table */
    359	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
    360			     table, &clk->arch_flags);
    361
    362	return 0;
    363}
    364
    365static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
    366	.recalc		= sh_clk_div_recalc,
    367	.set_rate	= sh_clk_div_set_rate,
    368	.round_rate	= sh_clk_div_round_rate,
    369	.enable		= sh_clk_div_enable,
    370	.disable	= sh_clk_div_disable,
    371	.set_parent	= sh_clk_div4_set_parent,
    372};
    373
    374int __init sh_clk_div4_register(struct clk *clks, int nr,
    375				struct clk_div4_table *table)
    376{
    377	return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
    378}
    379
    380int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
    381				struct clk_div4_table *table)
    382{
    383	return sh_clk_div_register_ops(clks, nr, table,
    384				       &sh_clk_div_enable_clk_ops);
    385}
    386
    387int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
    388				struct clk_div4_table *table)
    389{
    390	return sh_clk_div_register_ops(clks, nr, table,
    391				       &sh_clk_div4_reparent_clk_ops);
    392}
    393
    394/* FSI-DIV */
    395static unsigned long fsidiv_recalc(struct clk *clk)
    396{
    397	u32 value;
    398
    399	value = __raw_readl(clk->mapping->base);
    400
    401	value >>= 16;
    402	if (value < 2)
    403		return clk->parent->rate;
    404
    405	return clk->parent->rate / value;
    406}
    407
    408static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
    409{
    410	return clk_rate_div_range_round(clk, 1, 0xffff, rate);
    411}
    412
    413static void fsidiv_disable(struct clk *clk)
    414{
    415	__raw_writel(0, clk->mapping->base);
    416}
    417
    418static int fsidiv_enable(struct clk *clk)
    419{
    420	u32 value;
    421
    422	value  = __raw_readl(clk->mapping->base) >> 16;
    423	if (value < 2)
    424		return 0;
    425
    426	__raw_writel((value << 16) | 0x3, clk->mapping->base);
    427
    428	return 0;
    429}
    430
    431static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
    432{
    433	int idx;
    434
    435	idx = (clk->parent->rate / rate) & 0xffff;
    436	if (idx < 2)
    437		__raw_writel(0, clk->mapping->base);
    438	else
    439		__raw_writel(idx << 16, clk->mapping->base);
    440
    441	return 0;
    442}
    443
    444static struct sh_clk_ops fsidiv_clk_ops = {
    445	.recalc		= fsidiv_recalc,
    446	.round_rate	= fsidiv_round_rate,
    447	.set_rate	= fsidiv_set_rate,
    448	.enable		= fsidiv_enable,
    449	.disable	= fsidiv_disable,
    450};
    451
    452int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
    453{
    454	struct clk_mapping *map;
    455	int i;
    456
    457	for (i = 0; i < nr; i++) {
    458
    459		map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
    460		if (!map) {
    461			pr_err("%s: unable to alloc memory\n", __func__);
    462			return -ENOMEM;
    463		}
    464
    465		/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
    466		map->phys		= (phys_addr_t)clks[i].enable_reg;
    467		map->len		= 8;
    468
    469		clks[i].enable_reg	= 0; /* remove .enable_reg */
    470		clks[i].ops		= &fsidiv_clk_ops;
    471		clks[i].mapping		= map;
    472
    473		clk_register(&clks[i]);
    474	}
    475
    476	return 0;
    477}