cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

clk-mix.c (12081B)


      1/*
      2 * mmp mix(div and mux) clock operation source file
      3 *
      4 * Copyright (C) 2014 Marvell
      5 * Chao Xie <chao.xie@marvell.com>
      6 *
      7 * This file is licensed under the terms of the GNU General Public
      8 * License version 2. This program is licensed "as is" without any
      9 * warranty of any kind, whether express or implied.
     10 */
     11
     12#include <linux/clk-provider.h>
     13#include <linux/slab.h>
     14#include <linux/io.h>
     15#include <linux/err.h>
     16
     17#include "clk.h"
     18
     19/*
     20 * The mix clock is a clock combined mux and div type clock.
     21 * Because the div field and mux field need to be set at same
     22 * time, we can not divide it into 2 types of clock
     23 */
     24
     25#define to_clk_mix(hw)	container_of(hw, struct mmp_clk_mix, hw)
     26
     27static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
     28{
     29	unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
     30	unsigned int maxdiv = 0;
     31	struct clk_div_table *clkt;
     32
     33	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
     34		return div_mask;
     35	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
     36		return 1 << div_mask;
     37	if (mix->div_table) {
     38		for (clkt = mix->div_table; clkt->div; clkt++)
     39			if (clkt->div > maxdiv)
     40				maxdiv = clkt->div;
     41		return maxdiv;
     42	}
     43	return div_mask + 1;
     44}
     45
     46static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
     47{
     48	struct clk_div_table *clkt;
     49
     50	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
     51		return val;
     52	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
     53		return 1 << val;
     54	if (mix->div_table) {
     55		for (clkt = mix->div_table; clkt->div; clkt++)
     56			if (clkt->val == val)
     57				return clkt->div;
     58		if (clkt->div == 0)
     59			return 0;
     60	}
     61	return val + 1;
     62}
     63
     64static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
     65{
     66	int num_parents = clk_hw_get_num_parents(&mix->hw);
     67	int i;
     68
     69	if (mix->mux_flags & CLK_MUX_INDEX_BIT)
     70		return ffs(val) - 1;
     71	if (mix->mux_flags & CLK_MUX_INDEX_ONE)
     72		return val - 1;
     73	if (mix->mux_table) {
     74		for (i = 0; i < num_parents; i++)
     75			if (mix->mux_table[i] == val)
     76				return i;
     77		if (i == num_parents)
     78			return 0;
     79	}
     80
     81	return val;
     82}
     83static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
     84{
     85	struct clk_div_table *clkt;
     86
     87	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
     88		return div;
     89	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
     90		return __ffs(div);
     91	if (mix->div_table) {
     92		for (clkt = mix->div_table; clkt->div; clkt++)
     93			if (clkt->div == div)
     94				return clkt->val;
     95		if (clkt->div == 0)
     96			return 0;
     97	}
     98
     99	return div - 1;
    100}
    101
    102static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
    103{
    104	if (mix->mux_table)
    105		return mix->mux_table[mux];
    106
    107	return mux;
    108}
    109
    110static void _filter_clk_table(struct mmp_clk_mix *mix,
    111				struct mmp_clk_mix_clk_table *table,
    112				unsigned int table_size)
    113{
    114	int i;
    115	struct mmp_clk_mix_clk_table *item;
    116	struct clk_hw *parent, *hw;
    117	unsigned long parent_rate;
    118
    119	hw = &mix->hw;
    120
    121	for (i = 0; i < table_size; i++) {
    122		item = &table[i];
    123		parent = clk_hw_get_parent_by_index(hw, item->parent_index);
    124		parent_rate = clk_hw_get_rate(parent);
    125		if (parent_rate % item->rate) {
    126			item->valid = 0;
    127		} else {
    128			item->divisor = parent_rate / item->rate;
    129			item->valid = 1;
    130		}
    131	}
    132}
    133
    134static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
    135			unsigned int change_mux, unsigned int change_div)
    136{
    137	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
    138	u8 width, shift;
    139	u32 mux_div, fc_req;
    140	int ret, timeout = 50;
    141	unsigned long flags = 0;
    142
    143	if (!change_mux && !change_div)
    144		return -EINVAL;
    145
    146	if (mix->lock)
    147		spin_lock_irqsave(mix->lock, flags);
    148
    149	if (mix->type == MMP_CLK_MIX_TYPE_V1
    150		|| mix->type == MMP_CLK_MIX_TYPE_V2)
    151		mux_div = readl(ri->reg_clk_ctrl);
    152	else
    153		mux_div = readl(ri->reg_clk_sel);
    154
    155	if (change_div) {
    156		width = ri->width_div;
    157		shift = ri->shift_div;
    158		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
    159		mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
    160	}
    161
    162	if (change_mux) {
    163		width = ri->width_mux;
    164		shift = ri->shift_mux;
    165		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
    166		mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
    167	}
    168
    169	if (mix->type == MMP_CLK_MIX_TYPE_V1) {
    170		writel(mux_div, ri->reg_clk_ctrl);
    171	} else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
    172		mux_div |= (1 << ri->bit_fc);
    173		writel(mux_div, ri->reg_clk_ctrl);
    174
    175		do {
    176			fc_req = readl(ri->reg_clk_ctrl);
    177			timeout--;
    178			if (!(fc_req & (1 << ri->bit_fc)))
    179				break;
    180		} while (timeout);
    181
    182		if (timeout == 0) {
    183			pr_err("%s:%s cannot do frequency change\n",
    184				__func__, clk_hw_get_name(&mix->hw));
    185			ret = -EBUSY;
    186			goto error;
    187		}
    188	} else {
    189		fc_req = readl(ri->reg_clk_ctrl);
    190		fc_req |= 1 << ri->bit_fc;
    191		writel(fc_req, ri->reg_clk_ctrl);
    192		writel(mux_div, ri->reg_clk_sel);
    193		fc_req &= ~(1 << ri->bit_fc);
    194	}
    195
    196	ret = 0;
    197error:
    198	if (mix->lock)
    199		spin_unlock_irqrestore(mix->lock, flags);
    200
    201	return ret;
    202}
    203
    204static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
    205				      struct clk_rate_request *req)
    206{
    207	struct mmp_clk_mix *mix = to_clk_mix(hw);
    208	struct mmp_clk_mix_clk_table *item;
    209	struct clk_hw *parent, *parent_best;
    210	unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
    211	unsigned long gap, gap_best;
    212	u32 div_val_max;
    213	unsigned int div;
    214	int i, j;
    215
    216
    217	mix_rate_best = 0;
    218	parent_rate_best = 0;
    219	gap_best = ULONG_MAX;
    220	parent_best = NULL;
    221
    222	if (mix->table) {
    223		for (i = 0; i < mix->table_size; i++) {
    224			item = &mix->table[i];
    225			if (item->valid == 0)
    226				continue;
    227			parent = clk_hw_get_parent_by_index(hw,
    228							item->parent_index);
    229			parent_rate = clk_hw_get_rate(parent);
    230			mix_rate = parent_rate / item->divisor;
    231			gap = abs(mix_rate - req->rate);
    232			if (!parent_best || gap < gap_best) {
    233				parent_best = parent;
    234				parent_rate_best = parent_rate;
    235				mix_rate_best = mix_rate;
    236				gap_best = gap;
    237				if (gap_best == 0)
    238					goto found;
    239			}
    240		}
    241	} else {
    242		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
    243			parent = clk_hw_get_parent_by_index(hw, i);
    244			parent_rate = clk_hw_get_rate(parent);
    245			div_val_max = _get_maxdiv(mix);
    246			for (j = 0; j < div_val_max; j++) {
    247				div = _get_div(mix, j);
    248				mix_rate = parent_rate / div;
    249				gap = abs(mix_rate - req->rate);
    250				if (!parent_best || gap < gap_best) {
    251					parent_best = parent;
    252					parent_rate_best = parent_rate;
    253					mix_rate_best = mix_rate;
    254					gap_best = gap;
    255					if (gap_best == 0)
    256						goto found;
    257				}
    258			}
    259		}
    260	}
    261
    262found:
    263	if (!parent_best)
    264		return -EINVAL;
    265
    266	req->best_parent_rate = parent_rate_best;
    267	req->best_parent_hw = parent_best;
    268	req->rate = mix_rate_best;
    269
    270	return 0;
    271}
    272
    273static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
    274						unsigned long rate,
    275						unsigned long parent_rate,
    276						u8 index)
    277{
    278	struct mmp_clk_mix *mix = to_clk_mix(hw);
    279	unsigned int div;
    280	u32 div_val, mux_val;
    281
    282	div = parent_rate / rate;
    283	div_val = _get_div_val(mix, div);
    284	mux_val = _get_mux_val(mix, index);
    285
    286	return _set_rate(mix, mux_val, div_val, 1, 1);
    287}
    288
    289static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
    290{
    291	struct mmp_clk_mix *mix = to_clk_mix(hw);
    292	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
    293	unsigned long flags = 0;
    294	u32 mux_div = 0;
    295	u8 width, shift;
    296	u32 mux_val;
    297
    298	if (mix->lock)
    299		spin_lock_irqsave(mix->lock, flags);
    300
    301	if (mix->type == MMP_CLK_MIX_TYPE_V1
    302		|| mix->type == MMP_CLK_MIX_TYPE_V2)
    303		mux_div = readl(ri->reg_clk_ctrl);
    304	else
    305		mux_div = readl(ri->reg_clk_sel);
    306
    307	if (mix->lock)
    308		spin_unlock_irqrestore(mix->lock, flags);
    309
    310	width = mix->reg_info.width_mux;
    311	shift = mix->reg_info.shift_mux;
    312
    313	mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
    314
    315	return _get_mux(mix, mux_val);
    316}
    317
    318static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
    319					unsigned long parent_rate)
    320{
    321	struct mmp_clk_mix *mix = to_clk_mix(hw);
    322	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
    323	unsigned long flags = 0;
    324	u32 mux_div = 0;
    325	u8 width, shift;
    326	unsigned int div;
    327
    328	if (mix->lock)
    329		spin_lock_irqsave(mix->lock, flags);
    330
    331	if (mix->type == MMP_CLK_MIX_TYPE_V1
    332		|| mix->type == MMP_CLK_MIX_TYPE_V2)
    333		mux_div = readl(ri->reg_clk_ctrl);
    334	else
    335		mux_div = readl(ri->reg_clk_sel);
    336
    337	if (mix->lock)
    338		spin_unlock_irqrestore(mix->lock, flags);
    339
    340	width = mix->reg_info.width_div;
    341	shift = mix->reg_info.shift_div;
    342
    343	div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
    344
    345	return parent_rate / div;
    346}
    347
    348static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
    349{
    350	struct mmp_clk_mix *mix = to_clk_mix(hw);
    351	struct mmp_clk_mix_clk_table *item;
    352	int i;
    353	u32 div_val, mux_val;
    354
    355	if (mix->table) {
    356		for (i = 0; i < mix->table_size; i++) {
    357			item = &mix->table[i];
    358			if (item->valid == 0)
    359				continue;
    360			if (item->parent_index == index)
    361				break;
    362		}
    363		if (i < mix->table_size) {
    364			div_val = _get_div_val(mix, item->divisor);
    365			mux_val = _get_mux_val(mix, item->parent_index);
    366		} else
    367			return -EINVAL;
    368	} else {
    369		mux_val = _get_mux_val(mix, index);
    370		div_val = 0;
    371	}
    372
    373	return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
    374}
    375
    376static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
    377				unsigned long best_parent_rate)
    378{
    379	struct mmp_clk_mix *mix = to_clk_mix(hw);
    380	struct mmp_clk_mix_clk_table *item;
    381	unsigned long parent_rate;
    382	unsigned int best_divisor;
    383	struct clk_hw *parent;
    384	int i;
    385
    386	best_divisor = best_parent_rate / rate;
    387
    388	if (mix->table) {
    389		for (i = 0; i < mix->table_size; i++) {
    390			item = &mix->table[i];
    391			if (item->valid == 0)
    392				continue;
    393			parent = clk_hw_get_parent_by_index(hw,
    394							item->parent_index);
    395			parent_rate = clk_hw_get_rate(parent);
    396			if (parent_rate == best_parent_rate
    397				&& item->divisor == best_divisor)
    398				break;
    399		}
    400		if (i < mix->table_size)
    401			return _set_rate(mix,
    402					_get_mux_val(mix, item->parent_index),
    403					_get_div_val(mix, item->divisor),
    404					1, 1);
    405		else
    406			return -EINVAL;
    407	} else {
    408		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
    409			parent = clk_hw_get_parent_by_index(hw, i);
    410			parent_rate = clk_hw_get_rate(parent);
    411			if (parent_rate == best_parent_rate)
    412				break;
    413		}
    414		if (i < clk_hw_get_num_parents(hw))
    415			return _set_rate(mix, _get_mux_val(mix, i),
    416					_get_div_val(mix, best_divisor), 1, 1);
    417		else
    418			return -EINVAL;
    419	}
    420}
    421
    422static int mmp_clk_mix_init(struct clk_hw *hw)
    423{
    424	struct mmp_clk_mix *mix = to_clk_mix(hw);
    425
    426	if (mix->table)
    427		_filter_clk_table(mix, mix->table, mix->table_size);
    428
    429	return 0;
    430}
    431
    432const struct clk_ops mmp_clk_mix_ops = {
    433	.determine_rate = mmp_clk_mix_determine_rate,
    434	.set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
    435	.set_rate = mmp_clk_set_rate,
    436	.set_parent = mmp_clk_set_parent,
    437	.get_parent = mmp_clk_mix_get_parent,
    438	.recalc_rate = mmp_clk_mix_recalc_rate,
    439	.init = mmp_clk_mix_init,
    440};
    441
    442struct clk *mmp_clk_register_mix(struct device *dev,
    443					const char *name,
    444					const char * const *parent_names,
    445					u8 num_parents,
    446					unsigned long flags,
    447					struct mmp_clk_mix_config *config,
    448					spinlock_t *lock)
    449{
    450	struct mmp_clk_mix *mix;
    451	struct clk *clk;
    452	struct clk_init_data init;
    453	size_t table_bytes;
    454
    455	mix = kzalloc(sizeof(*mix), GFP_KERNEL);
    456	if (!mix)
    457		return ERR_PTR(-ENOMEM);
    458
    459	init.name = name;
    460	init.flags = flags | CLK_GET_RATE_NOCACHE;
    461	init.parent_names = parent_names;
    462	init.num_parents = num_parents;
    463	init.ops = &mmp_clk_mix_ops;
    464
    465	memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
    466	if (config->table) {
    467		table_bytes = sizeof(*config->table) * config->table_size;
    468		mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
    469		if (!mix->table)
    470			goto free_mix;
    471
    472		mix->table_size = config->table_size;
    473	}
    474
    475	if (config->mux_table) {
    476		table_bytes = sizeof(u32) * num_parents;
    477		mix->mux_table = kmemdup(config->mux_table, table_bytes,
    478					 GFP_KERNEL);
    479		if (!mix->mux_table) {
    480			kfree(mix->table);
    481			goto free_mix;
    482		}
    483	}
    484
    485	mix->div_flags = config->div_flags;
    486	mix->mux_flags = config->mux_flags;
    487	mix->lock = lock;
    488	mix->hw.init = &init;
    489
    490	if (config->reg_info.bit_fc >= 32)
    491		mix->type = MMP_CLK_MIX_TYPE_V1;
    492	else if (config->reg_info.reg_clk_sel)
    493		mix->type = MMP_CLK_MIX_TYPE_V3;
    494	else
    495		mix->type = MMP_CLK_MIX_TYPE_V2;
    496	clk = clk_register(dev, &mix->hw);
    497
    498	if (IS_ERR(clk)) {
    499		kfree(mix->mux_table);
    500		kfree(mix->table);
    501		kfree(mix);
    502	}
    503
    504	return clk;
    505
    506free_mix:
    507	kfree(mix);
    508	return ERR_PTR(-ENOMEM);
    509}