cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bman_ccsr.c (9168B)


      1/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
      2 *
      3 * Redistribution and use in source and binary forms, with or without
      4 * modification, are permitted provided that the following conditions are met:
      5 *     * Redistributions of source code must retain the above copyright
      6 *	 notice, this list of conditions and the following disclaimer.
      7 *     * Redistributions in binary form must reproduce the above copyright
      8 *	 notice, this list of conditions and the following disclaimer in the
      9 *	 documentation and/or other materials provided with the distribution.
     10 *     * Neither the name of Freescale Semiconductor nor the
     11 *	 names of its contributors may be used to endorse or promote products
     12 *	 derived from this software without specific prior written permission.
     13 *
     14 * ALTERNATIVELY, this software may be distributed under the terms of the
     15 * GNU General Public License ("GPL") as published by the Free Software
     16 * Foundation, either version 2 of that License or (at your option) any
     17 * later version.
     18 *
     19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
     20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
     23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
     26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 */
     30
     31#include "bman_priv.h"
     32
     33u16 bman_ip_rev;
     34EXPORT_SYMBOL(bman_ip_rev);
     35
     36/* Register offsets */
     37#define REG_FBPR_FPC		0x0800
     38#define REG_ECSR		0x0a00
     39#define REG_ECIR		0x0a04
     40#define REG_EADR		0x0a08
     41#define REG_EDATA(n)		(0x0a10 + ((n) * 0x04))
     42#define REG_SBEC(n)		(0x0a80 + ((n) * 0x04))
     43#define REG_IP_REV_1		0x0bf8
     44#define REG_IP_REV_2		0x0bfc
     45#define REG_FBPR_BARE		0x0c00
     46#define REG_FBPR_BAR		0x0c04
     47#define REG_FBPR_AR		0x0c10
     48#define REG_SRCIDR		0x0d04
     49#define REG_LIODNR		0x0d08
     50#define REG_ERR_ISR		0x0e00
     51#define REG_ERR_IER		0x0e04
     52#define REG_ERR_ISDR		0x0e08
     53
     54/* Used by all error interrupt registers except 'inhibit' */
     55#define BM_EIRQ_IVCI	0x00000010	/* Invalid Command Verb */
     56#define BM_EIRQ_FLWI	0x00000008	/* FBPR Low Watermark */
     57#define BM_EIRQ_MBEI	0x00000004	/* Multi-bit ECC Error */
     58#define BM_EIRQ_SBEI	0x00000002	/* Single-bit ECC Error */
     59#define BM_EIRQ_BSCN	0x00000001	/* pool State Change Notification */
     60
     61struct bman_hwerr_txt {
     62	u32 mask;
     63	const char *txt;
     64};
     65
     66static const struct bman_hwerr_txt bman_hwerr_txts[] = {
     67	{ BM_EIRQ_IVCI, "Invalid Command Verb" },
     68	{ BM_EIRQ_FLWI, "FBPR Low Watermark" },
     69	{ BM_EIRQ_MBEI, "Multi-bit ECC Error" },
     70	{ BM_EIRQ_SBEI, "Single-bit ECC Error" },
     71	{ BM_EIRQ_BSCN, "Pool State Change Notification" },
     72};
     73
     74/* Only trigger low water mark interrupt once only */
     75#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
     76
     77/* Pointer to the start of the BMan's CCSR space */
     78static u32 __iomem *bm_ccsr_start;
     79
     80static inline u32 bm_ccsr_in(u32 offset)
     81{
     82	return ioread32be(bm_ccsr_start + offset/4);
     83}
     84static inline void bm_ccsr_out(u32 offset, u32 val)
     85{
     86	iowrite32be(val, bm_ccsr_start + offset/4);
     87}
     88
     89static void bm_get_version(u16 *id, u8 *major, u8 *minor)
     90{
     91	u32 v = bm_ccsr_in(REG_IP_REV_1);
     92	*id = (v >> 16);
     93	*major = (v >> 8) & 0xff;
     94	*minor = v & 0xff;
     95}
     96
     97/* signal transactions for FBPRs with higher priority */
     98#define FBPR_AR_RPRIO_HI BIT(30)
     99
    100/* Track if probe has occurred and if cleanup is required */
    101static int __bman_probed;
    102static int __bman_requires_cleanup;
    103
    104
    105static int bm_set_memory(u64 ba, u32 size)
    106{
    107	u32 bar, bare;
    108	u32 exp = ilog2(size);
    109	/* choke if size isn't within range */
    110	DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
    111		   is_power_of_2(size));
    112	/* choke if '[e]ba' has lower-alignment than 'size' */
    113	DPAA_ASSERT(!(ba & (size - 1)));
    114
    115	/* Check to see if BMan has already been initialized */
    116	bar = bm_ccsr_in(REG_FBPR_BAR);
    117	if (bar) {
    118		/* Maker sure ba == what was programmed) */
    119		bare = bm_ccsr_in(REG_FBPR_BARE);
    120		if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
    121			pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
    122			       ba, bare, bar);
    123			return -ENOMEM;
    124		}
    125		pr_info("BMan BAR already configured\n");
    126		__bman_requires_cleanup = 1;
    127		return 1;
    128	}
    129
    130	bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
    131	bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
    132	bm_ccsr_out(REG_FBPR_AR, exp - 1);
    133	return 0;
    134}
    135
    136/*
    137 * Location and size of BMan private memory
    138 *
    139 * Ideally we would use the DMA API to turn rmem->base into a DMA address
    140 * (especially if iommu translations ever get involved).  Unfortunately, the
    141 * DMA API currently does not allow mapping anything that is not backed with
    142 * a struct page.
    143 */
    144static dma_addr_t fbpr_a;
    145static size_t fbpr_sz;
    146
    147static int bman_fbpr(struct reserved_mem *rmem)
    148{
    149	fbpr_a = rmem->base;
    150	fbpr_sz = rmem->size;
    151
    152	WARN_ON(!(fbpr_a && fbpr_sz));
    153
    154	return 0;
    155}
    156RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
    157
    158static irqreturn_t bman_isr(int irq, void *ptr)
    159{
    160	u32 isr_val, ier_val, ecsr_val, isr_mask, i;
    161	struct device *dev = ptr;
    162
    163	ier_val = bm_ccsr_in(REG_ERR_IER);
    164	isr_val = bm_ccsr_in(REG_ERR_ISR);
    165	ecsr_val = bm_ccsr_in(REG_ECSR);
    166	isr_mask = isr_val & ier_val;
    167
    168	if (!isr_mask)
    169		return IRQ_NONE;
    170
    171	for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
    172		if (bman_hwerr_txts[i].mask & isr_mask) {
    173			dev_err_ratelimited(dev, "ErrInt: %s\n",
    174					    bman_hwerr_txts[i].txt);
    175			if (bman_hwerr_txts[i].mask & ecsr_val) {
    176				/* Re-arm error capture registers */
    177				bm_ccsr_out(REG_ECSR, ecsr_val);
    178			}
    179			if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
    180				dev_dbg(dev, "Disabling error 0x%x\n",
    181					bman_hwerr_txts[i].mask);
    182				ier_val &= ~bman_hwerr_txts[i].mask;
    183				bm_ccsr_out(REG_ERR_IER, ier_val);
    184			}
    185		}
    186	}
    187	bm_ccsr_out(REG_ERR_ISR, isr_val);
    188
    189	return IRQ_HANDLED;
    190}
    191
    192int bman_is_probed(void)
    193{
    194	return __bman_probed;
    195}
    196EXPORT_SYMBOL_GPL(bman_is_probed);
    197
    198int bman_requires_cleanup(void)
    199{
    200	return __bman_requires_cleanup;
    201}
    202
    203void bman_done_cleanup(void)
    204{
    205	__bman_requires_cleanup = 0;
    206}
    207
    208static int fsl_bman_probe(struct platform_device *pdev)
    209{
    210	int ret, err_irq;
    211	struct device *dev = &pdev->dev;
    212	struct device_node *node = dev->of_node;
    213	struct resource *res;
    214	u16 id, bm_pool_cnt;
    215	u8 major, minor;
    216
    217	__bman_probed = -1;
    218
    219	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    220	if (!res) {
    221		dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
    222			node);
    223		return -ENXIO;
    224	}
    225	bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
    226	if (!bm_ccsr_start)
    227		return -ENXIO;
    228
    229	bm_get_version(&id, &major, &minor);
    230	if (major == 1 && minor == 0) {
    231		bman_ip_rev = BMAN_REV10;
    232		bm_pool_cnt = BM_POOL_MAX;
    233	} else if (major == 2 && minor == 0) {
    234		bman_ip_rev = BMAN_REV20;
    235		bm_pool_cnt = 8;
    236	} else if (major == 2 && minor == 1) {
    237		bman_ip_rev = BMAN_REV21;
    238		bm_pool_cnt = BM_POOL_MAX;
    239	} else {
    240		dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
    241			id, major, minor);
    242		return -ENODEV;
    243	}
    244
    245	/*
    246	 * If FBPR memory wasn't defined using the qbman compatible string
    247	 * try using the of_reserved_mem_device method
    248	 */
    249	if (!fbpr_a) {
    250		ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
    251		if (ret) {
    252			dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
    253				ret);
    254			return -ENODEV;
    255		}
    256	}
    257
    258	dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
    259
    260	bm_set_memory(fbpr_a, fbpr_sz);
    261
    262	err_irq = platform_get_irq(pdev, 0);
    263	if (err_irq <= 0) {
    264		dev_info(dev, "Can't get %pOF IRQ\n", node);
    265		return -ENODEV;
    266	}
    267	ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
    268			       dev);
    269	if (ret)  {
    270		dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
    271			ret, node);
    272		return ret;
    273	}
    274	/* Disable Buffer Pool State Change */
    275	bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
    276	/*
    277	 * Write-to-clear any stale bits, (eg. starvation being asserted prior
    278	 * to resource allocation during driver init).
    279	 */
    280	bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
    281	/* Enable Error Interrupts */
    282	bm_ccsr_out(REG_ERR_IER, 0xffffffff);
    283
    284	bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
    285	if (IS_ERR(bm_bpalloc)) {
    286		ret = PTR_ERR(bm_bpalloc);
    287		dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
    288		return ret;
    289	}
    290
    291	/* seed BMan resource pool */
    292	ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
    293	if (ret) {
    294		dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
    295			0, bm_pool_cnt - 1, ret);
    296		return ret;
    297	}
    298
    299	__bman_probed = 1;
    300
    301	return 0;
    302};
    303
    304static const struct of_device_id fsl_bman_ids[] = {
    305	{
    306		.compatible = "fsl,bman",
    307	},
    308	{}
    309};
    310
    311static struct platform_driver fsl_bman_driver = {
    312	.driver = {
    313		.name = KBUILD_MODNAME,
    314		.of_match_table = fsl_bman_ids,
    315		.suppress_bind_attrs = true,
    316	},
    317	.probe = fsl_bman_probe,
    318};
    319
    320builtin_platform_driver(fsl_bman_driver);