cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

regcache.c (17775B)


      1// SPDX-License-Identifier: GPL-2.0
      2//
      3// Register cache access API
      4//
      5// Copyright 2011 Wolfson Microelectronics plc
      6//
      7// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
      8
      9#include <linux/bsearch.h>
     10#include <linux/device.h>
     11#include <linux/export.h>
     12#include <linux/slab.h>
     13#include <linux/sort.h>
     14
     15#include "trace.h"
     16#include "internal.h"
     17
     18static const struct regcache_ops *cache_types[] = {
     19	&regcache_rbtree_ops,
     20#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
     21	&regcache_lzo_ops,
     22#endif
     23	&regcache_flat_ops,
     24};
     25
     26static int regcache_hw_init(struct regmap *map)
     27{
     28	int i, j;
     29	int ret;
     30	int count;
     31	unsigned int reg, val;
     32	void *tmp_buf;
     33
     34	if (!map->num_reg_defaults_raw)
     35		return -EINVAL;
     36
     37	/* calculate the size of reg_defaults */
     38	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
     39		if (regmap_readable(map, i * map->reg_stride) &&
     40		    !regmap_volatile(map, i * map->reg_stride))
     41			count++;
     42
     43	/* all registers are unreadable or volatile, so just bypass */
     44	if (!count) {
     45		map->cache_bypass = true;
     46		return 0;
     47	}
     48
     49	map->num_reg_defaults = count;
     50	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
     51					  GFP_KERNEL);
     52	if (!map->reg_defaults)
     53		return -ENOMEM;
     54
     55	if (!map->reg_defaults_raw) {
     56		bool cache_bypass = map->cache_bypass;
     57		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
     58
     59		/* Bypass the cache access till data read from HW */
     60		map->cache_bypass = true;
     61		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
     62		if (!tmp_buf) {
     63			ret = -ENOMEM;
     64			goto err_free;
     65		}
     66		ret = regmap_raw_read(map, 0, tmp_buf,
     67				      map->cache_size_raw);
     68		map->cache_bypass = cache_bypass;
     69		if (ret == 0) {
     70			map->reg_defaults_raw = tmp_buf;
     71			map->cache_free = true;
     72		} else {
     73			kfree(tmp_buf);
     74		}
     75	}
     76
     77	/* fill the reg_defaults */
     78	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
     79		reg = i * map->reg_stride;
     80
     81		if (!regmap_readable(map, reg))
     82			continue;
     83
     84		if (regmap_volatile(map, reg))
     85			continue;
     86
     87		if (map->reg_defaults_raw) {
     88			val = regcache_get_val(map, map->reg_defaults_raw, i);
     89		} else {
     90			bool cache_bypass = map->cache_bypass;
     91
     92			map->cache_bypass = true;
     93			ret = regmap_read(map, reg, &val);
     94			map->cache_bypass = cache_bypass;
     95			if (ret != 0) {
     96				dev_err(map->dev, "Failed to read %d: %d\n",
     97					reg, ret);
     98				goto err_free;
     99			}
    100		}
    101
    102		map->reg_defaults[j].reg = reg;
    103		map->reg_defaults[j].def = val;
    104		j++;
    105	}
    106
    107	return 0;
    108
    109err_free:
    110	kfree(map->reg_defaults);
    111
    112	return ret;
    113}
    114
    115int regcache_init(struct regmap *map, const struct regmap_config *config)
    116{
    117	int ret;
    118	int i;
    119	void *tmp_buf;
    120
    121	if (map->cache_type == REGCACHE_NONE) {
    122		if (config->reg_defaults || config->num_reg_defaults_raw)
    123			dev_warn(map->dev,
    124				 "No cache used with register defaults set!\n");
    125
    126		map->cache_bypass = true;
    127		return 0;
    128	}
    129
    130	if (config->reg_defaults && !config->num_reg_defaults) {
    131		dev_err(map->dev,
    132			 "Register defaults are set without the number!\n");
    133		return -EINVAL;
    134	}
    135
    136	for (i = 0; i < config->num_reg_defaults; i++)
    137		if (config->reg_defaults[i].reg % map->reg_stride)
    138			return -EINVAL;
    139
    140	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
    141		if (cache_types[i]->type == map->cache_type)
    142			break;
    143
    144	if (i == ARRAY_SIZE(cache_types)) {
    145		dev_err(map->dev, "Could not match compress type: %d\n",
    146			map->cache_type);
    147		return -EINVAL;
    148	}
    149
    150	map->num_reg_defaults = config->num_reg_defaults;
    151	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
    152	map->reg_defaults_raw = config->reg_defaults_raw;
    153	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
    154	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
    155
    156	map->cache = NULL;
    157	map->cache_ops = cache_types[i];
    158
    159	if (!map->cache_ops->read ||
    160	    !map->cache_ops->write ||
    161	    !map->cache_ops->name)
    162		return -EINVAL;
    163
    164	/* We still need to ensure that the reg_defaults
    165	 * won't vanish from under us.  We'll need to make
    166	 * a copy of it.
    167	 */
    168	if (config->reg_defaults) {
    169		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
    170				  sizeof(struct reg_default), GFP_KERNEL);
    171		if (!tmp_buf)
    172			return -ENOMEM;
    173		map->reg_defaults = tmp_buf;
    174	} else if (map->num_reg_defaults_raw) {
    175		/* Some devices such as PMICs don't have cache defaults,
    176		 * we cope with this by reading back the HW registers and
    177		 * crafting the cache defaults by hand.
    178		 */
    179		ret = regcache_hw_init(map);
    180		if (ret < 0)
    181			return ret;
    182		if (map->cache_bypass)
    183			return 0;
    184	}
    185
    186	if (!map->max_register && map->num_reg_defaults_raw)
    187		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
    188
    189	if (map->cache_ops->init) {
    190		dev_dbg(map->dev, "Initializing %s cache\n",
    191			map->cache_ops->name);
    192		ret = map->cache_ops->init(map);
    193		if (ret)
    194			goto err_free;
    195	}
    196	return 0;
    197
    198err_free:
    199	kfree(map->reg_defaults);
    200	if (map->cache_free)
    201		kfree(map->reg_defaults_raw);
    202
    203	return ret;
    204}
    205
    206void regcache_exit(struct regmap *map)
    207{
    208	if (map->cache_type == REGCACHE_NONE)
    209		return;
    210
    211	BUG_ON(!map->cache_ops);
    212
    213	kfree(map->reg_defaults);
    214	if (map->cache_free)
    215		kfree(map->reg_defaults_raw);
    216
    217	if (map->cache_ops->exit) {
    218		dev_dbg(map->dev, "Destroying %s cache\n",
    219			map->cache_ops->name);
    220		map->cache_ops->exit(map);
    221	}
    222}
    223
    224/**
    225 * regcache_read - Fetch the value of a given register from the cache.
    226 *
    227 * @map: map to configure.
    228 * @reg: The register index.
    229 * @value: The value to be returned.
    230 *
    231 * Return a negative value on failure, 0 on success.
    232 */
    233int regcache_read(struct regmap *map,
    234		  unsigned int reg, unsigned int *value)
    235{
    236	int ret;
    237
    238	if (map->cache_type == REGCACHE_NONE)
    239		return -ENOSYS;
    240
    241	BUG_ON(!map->cache_ops);
    242
    243	if (!regmap_volatile(map, reg)) {
    244		ret = map->cache_ops->read(map, reg, value);
    245
    246		if (ret == 0)
    247			trace_regmap_reg_read_cache(map, reg, *value);
    248
    249		return ret;
    250	}
    251
    252	return -EINVAL;
    253}
    254
    255/**
    256 * regcache_write - Set the value of a given register in the cache.
    257 *
    258 * @map: map to configure.
    259 * @reg: The register index.
    260 * @value: The new register value.
    261 *
    262 * Return a negative value on failure, 0 on success.
    263 */
    264int regcache_write(struct regmap *map,
    265		   unsigned int reg, unsigned int value)
    266{
    267	if (map->cache_type == REGCACHE_NONE)
    268		return 0;
    269
    270	BUG_ON(!map->cache_ops);
    271
    272	if (!regmap_volatile(map, reg))
    273		return map->cache_ops->write(map, reg, value);
    274
    275	return 0;
    276}
    277
    278static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
    279				    unsigned int val)
    280{
    281	int ret;
    282
    283	/* If we don't know the chip just got reset, then sync everything. */
    284	if (!map->no_sync_defaults)
    285		return true;
    286
    287	/* Is this the hardware default?  If so skip. */
    288	ret = regcache_lookup_reg(map, reg);
    289	if (ret >= 0 && val == map->reg_defaults[ret].def)
    290		return false;
    291	return true;
    292}
    293
    294static int regcache_default_sync(struct regmap *map, unsigned int min,
    295				 unsigned int max)
    296{
    297	unsigned int reg;
    298
    299	for (reg = min; reg <= max; reg += map->reg_stride) {
    300		unsigned int val;
    301		int ret;
    302
    303		if (regmap_volatile(map, reg) ||
    304		    !regmap_writeable(map, reg))
    305			continue;
    306
    307		ret = regcache_read(map, reg, &val);
    308		if (ret)
    309			return ret;
    310
    311		if (!regcache_reg_needs_sync(map, reg, val))
    312			continue;
    313
    314		map->cache_bypass = true;
    315		ret = _regmap_write(map, reg, val);
    316		map->cache_bypass = false;
    317		if (ret) {
    318			dev_err(map->dev, "Unable to sync register %#x. %d\n",
    319				reg, ret);
    320			return ret;
    321		}
    322		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
    323	}
    324
    325	return 0;
    326}
    327
    328/**
    329 * regcache_sync - Sync the register cache with the hardware.
    330 *
    331 * @map: map to configure.
    332 *
    333 * Any registers that should not be synced should be marked as
    334 * volatile.  In general drivers can choose not to use the provided
    335 * syncing functionality if they so require.
    336 *
    337 * Return a negative value on failure, 0 on success.
    338 */
    339int regcache_sync(struct regmap *map)
    340{
    341	int ret = 0;
    342	unsigned int i;
    343	const char *name;
    344	bool bypass;
    345
    346	BUG_ON(!map->cache_ops);
    347
    348	map->lock(map->lock_arg);
    349	/* Remember the initial bypass state */
    350	bypass = map->cache_bypass;
    351	dev_dbg(map->dev, "Syncing %s cache\n",
    352		map->cache_ops->name);
    353	name = map->cache_ops->name;
    354	trace_regcache_sync(map, name, "start");
    355
    356	if (!map->cache_dirty)
    357		goto out;
    358
    359	map->async = true;
    360
    361	/* Apply any patch first */
    362	map->cache_bypass = true;
    363	for (i = 0; i < map->patch_regs; i++) {
    364		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
    365		if (ret != 0) {
    366			dev_err(map->dev, "Failed to write %x = %x: %d\n",
    367				map->patch[i].reg, map->patch[i].def, ret);
    368			goto out;
    369		}
    370	}
    371	map->cache_bypass = false;
    372
    373	if (map->cache_ops->sync)
    374		ret = map->cache_ops->sync(map, 0, map->max_register);
    375	else
    376		ret = regcache_default_sync(map, 0, map->max_register);
    377
    378	if (ret == 0)
    379		map->cache_dirty = false;
    380
    381out:
    382	/* Restore the bypass state */
    383	map->async = false;
    384	map->cache_bypass = bypass;
    385	map->no_sync_defaults = false;
    386	map->unlock(map->lock_arg);
    387
    388	regmap_async_complete(map);
    389
    390	trace_regcache_sync(map, name, "stop");
    391
    392	return ret;
    393}
    394EXPORT_SYMBOL_GPL(regcache_sync);
    395
    396/**
    397 * regcache_sync_region - Sync part  of the register cache with the hardware.
    398 *
    399 * @map: map to sync.
    400 * @min: first register to sync
    401 * @max: last register to sync
    402 *
    403 * Write all non-default register values in the specified region to
    404 * the hardware.
    405 *
    406 * Return a negative value on failure, 0 on success.
    407 */
    408int regcache_sync_region(struct regmap *map, unsigned int min,
    409			 unsigned int max)
    410{
    411	int ret = 0;
    412	const char *name;
    413	bool bypass;
    414
    415	BUG_ON(!map->cache_ops);
    416
    417	map->lock(map->lock_arg);
    418
    419	/* Remember the initial bypass state */
    420	bypass = map->cache_bypass;
    421
    422	name = map->cache_ops->name;
    423	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
    424
    425	trace_regcache_sync(map, name, "start region");
    426
    427	if (!map->cache_dirty)
    428		goto out;
    429
    430	map->async = true;
    431
    432	if (map->cache_ops->sync)
    433		ret = map->cache_ops->sync(map, min, max);
    434	else
    435		ret = regcache_default_sync(map, min, max);
    436
    437out:
    438	/* Restore the bypass state */
    439	map->cache_bypass = bypass;
    440	map->async = false;
    441	map->no_sync_defaults = false;
    442	map->unlock(map->lock_arg);
    443
    444	regmap_async_complete(map);
    445
    446	trace_regcache_sync(map, name, "stop region");
    447
    448	return ret;
    449}
    450EXPORT_SYMBOL_GPL(regcache_sync_region);
    451
    452/**
    453 * regcache_drop_region - Discard part of the register cache
    454 *
    455 * @map: map to operate on
    456 * @min: first register to discard
    457 * @max: last register to discard
    458 *
    459 * Discard part of the register cache.
    460 *
    461 * Return a negative value on failure, 0 on success.
    462 */
    463int regcache_drop_region(struct regmap *map, unsigned int min,
    464			 unsigned int max)
    465{
    466	int ret = 0;
    467
    468	if (!map->cache_ops || !map->cache_ops->drop)
    469		return -EINVAL;
    470
    471	map->lock(map->lock_arg);
    472
    473	trace_regcache_drop_region(map, min, max);
    474
    475	ret = map->cache_ops->drop(map, min, max);
    476
    477	map->unlock(map->lock_arg);
    478
    479	return ret;
    480}
    481EXPORT_SYMBOL_GPL(regcache_drop_region);
    482
    483/**
    484 * regcache_cache_only - Put a register map into cache only mode
    485 *
    486 * @map: map to configure
    487 * @enable: flag if changes should be written to the hardware
    488 *
    489 * When a register map is marked as cache only writes to the register
    490 * map API will only update the register cache, they will not cause
    491 * any hardware changes.  This is useful for allowing portions of
    492 * drivers to act as though the device were functioning as normal when
    493 * it is disabled for power saving reasons.
    494 */
    495void regcache_cache_only(struct regmap *map, bool enable)
    496{
    497	map->lock(map->lock_arg);
    498	WARN_ON(map->cache_bypass && enable);
    499	map->cache_only = enable;
    500	trace_regmap_cache_only(map, enable);
    501	map->unlock(map->lock_arg);
    502}
    503EXPORT_SYMBOL_GPL(regcache_cache_only);
    504
    505/**
    506 * regcache_mark_dirty - Indicate that HW registers were reset to default values
    507 *
    508 * @map: map to mark
    509 *
    510 * Inform regcache that the device has been powered down or reset, so that
    511 * on resume, regcache_sync() knows to write out all non-default values
    512 * stored in the cache.
    513 *
    514 * If this function is not called, regcache_sync() will assume that
    515 * the hardware state still matches the cache state, modulo any writes that
    516 * happened when cache_only was true.
    517 */
    518void regcache_mark_dirty(struct regmap *map)
    519{
    520	map->lock(map->lock_arg);
    521	map->cache_dirty = true;
    522	map->no_sync_defaults = true;
    523	map->unlock(map->lock_arg);
    524}
    525EXPORT_SYMBOL_GPL(regcache_mark_dirty);
    526
    527/**
    528 * regcache_cache_bypass - Put a register map into cache bypass mode
    529 *
    530 * @map: map to configure
    531 * @enable: flag if changes should not be written to the cache
    532 *
    533 * When a register map is marked with the cache bypass option, writes
    534 * to the register map API will only update the hardware and not the
    535 * the cache directly.  This is useful when syncing the cache back to
    536 * the hardware.
    537 */
    538void regcache_cache_bypass(struct regmap *map, bool enable)
    539{
    540	map->lock(map->lock_arg);
    541	WARN_ON(map->cache_only && enable);
    542	map->cache_bypass = enable;
    543	trace_regmap_cache_bypass(map, enable);
    544	map->unlock(map->lock_arg);
    545}
    546EXPORT_SYMBOL_GPL(regcache_cache_bypass);
    547
    548bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
    549		      unsigned int val)
    550{
    551	if (regcache_get_val(map, base, idx) == val)
    552		return true;
    553
    554	/* Use device native format if possible */
    555	if (map->format.format_val) {
    556		map->format.format_val(base + (map->cache_word_size * idx),
    557				       val, 0);
    558		return false;
    559	}
    560
    561	switch (map->cache_word_size) {
    562	case 1: {
    563		u8 *cache = base;
    564
    565		cache[idx] = val;
    566		break;
    567	}
    568	case 2: {
    569		u16 *cache = base;
    570
    571		cache[idx] = val;
    572		break;
    573	}
    574	case 4: {
    575		u32 *cache = base;
    576
    577		cache[idx] = val;
    578		break;
    579	}
    580#ifdef CONFIG_64BIT
    581	case 8: {
    582		u64 *cache = base;
    583
    584		cache[idx] = val;
    585		break;
    586	}
    587#endif
    588	default:
    589		BUG();
    590	}
    591	return false;
    592}
    593
    594unsigned int regcache_get_val(struct regmap *map, const void *base,
    595			      unsigned int idx)
    596{
    597	if (!base)
    598		return -EINVAL;
    599
    600	/* Use device native format if possible */
    601	if (map->format.parse_val)
    602		return map->format.parse_val(regcache_get_val_addr(map, base,
    603								   idx));
    604
    605	switch (map->cache_word_size) {
    606	case 1: {
    607		const u8 *cache = base;
    608
    609		return cache[idx];
    610	}
    611	case 2: {
    612		const u16 *cache = base;
    613
    614		return cache[idx];
    615	}
    616	case 4: {
    617		const u32 *cache = base;
    618
    619		return cache[idx];
    620	}
    621#ifdef CONFIG_64BIT
    622	case 8: {
    623		const u64 *cache = base;
    624
    625		return cache[idx];
    626	}
    627#endif
    628	default:
    629		BUG();
    630	}
    631	/* unreachable */
    632	return -1;
    633}
    634
    635static int regcache_default_cmp(const void *a, const void *b)
    636{
    637	const struct reg_default *_a = a;
    638	const struct reg_default *_b = b;
    639
    640	return _a->reg - _b->reg;
    641}
    642
    643int regcache_lookup_reg(struct regmap *map, unsigned int reg)
    644{
    645	struct reg_default key;
    646	struct reg_default *r;
    647
    648	key.reg = reg;
    649	key.def = 0;
    650
    651	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
    652		    sizeof(struct reg_default), regcache_default_cmp);
    653
    654	if (r)
    655		return r - map->reg_defaults;
    656	else
    657		return -ENOENT;
    658}
    659
    660static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
    661{
    662	if (!cache_present)
    663		return true;
    664
    665	return test_bit(idx, cache_present);
    666}
    667
    668static int regcache_sync_block_single(struct regmap *map, void *block,
    669				      unsigned long *cache_present,
    670				      unsigned int block_base,
    671				      unsigned int start, unsigned int end)
    672{
    673	unsigned int i, regtmp, val;
    674	int ret;
    675
    676	for (i = start; i < end; i++) {
    677		regtmp = block_base + (i * map->reg_stride);
    678
    679		if (!regcache_reg_present(cache_present, i) ||
    680		    !regmap_writeable(map, regtmp))
    681			continue;
    682
    683		val = regcache_get_val(map, block, i);
    684		if (!regcache_reg_needs_sync(map, regtmp, val))
    685			continue;
    686
    687		map->cache_bypass = true;
    688
    689		ret = _regmap_write(map, regtmp, val);
    690
    691		map->cache_bypass = false;
    692		if (ret != 0) {
    693			dev_err(map->dev, "Unable to sync register %#x. %d\n",
    694				regtmp, ret);
    695			return ret;
    696		}
    697		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
    698			regtmp, val);
    699	}
    700
    701	return 0;
    702}
    703
    704static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
    705					 unsigned int base, unsigned int cur)
    706{
    707	size_t val_bytes = map->format.val_bytes;
    708	int ret, count;
    709
    710	if (*data == NULL)
    711		return 0;
    712
    713	count = (cur - base) / map->reg_stride;
    714
    715	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
    716		count * val_bytes, count, base, cur - map->reg_stride);
    717
    718	map->cache_bypass = true;
    719
    720	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
    721	if (ret)
    722		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
    723			base, cur - map->reg_stride, ret);
    724
    725	map->cache_bypass = false;
    726
    727	*data = NULL;
    728
    729	return ret;
    730}
    731
    732static int regcache_sync_block_raw(struct regmap *map, void *block,
    733			    unsigned long *cache_present,
    734			    unsigned int block_base, unsigned int start,
    735			    unsigned int end)
    736{
    737	unsigned int i, val;
    738	unsigned int regtmp = 0;
    739	unsigned int base = 0;
    740	const void *data = NULL;
    741	int ret;
    742
    743	for (i = start; i < end; i++) {
    744		regtmp = block_base + (i * map->reg_stride);
    745
    746		if (!regcache_reg_present(cache_present, i) ||
    747		    !regmap_writeable(map, regtmp)) {
    748			ret = regcache_sync_block_raw_flush(map, &data,
    749							    base, regtmp);
    750			if (ret != 0)
    751				return ret;
    752			continue;
    753		}
    754
    755		val = regcache_get_val(map, block, i);
    756		if (!regcache_reg_needs_sync(map, regtmp, val)) {
    757			ret = regcache_sync_block_raw_flush(map, &data,
    758							    base, regtmp);
    759			if (ret != 0)
    760				return ret;
    761			continue;
    762		}
    763
    764		if (!data) {
    765			data = regcache_get_val_addr(map, block, i);
    766			base = regtmp;
    767		}
    768	}
    769
    770	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
    771			map->reg_stride);
    772}
    773
    774int regcache_sync_block(struct regmap *map, void *block,
    775			unsigned long *cache_present,
    776			unsigned int block_base, unsigned int start,
    777			unsigned int end)
    778{
    779	if (regmap_can_raw_write(map) && !map->use_single_write)
    780		return regcache_sync_block_raw(map, block, cache_present,
    781					       block_base, start, end);
    782	else
    783		return regcache_sync_block_single(map, block, cache_present,
    784						  block_base, start, end);
    785}