cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mvneta_bm.c (13347B)


      1/*
      2 * Driver for Marvell NETA network controller Buffer Manager.
      3 *
      4 * Copyright (C) 2015 Marvell
      5 *
      6 * Marcin Wojtas <mw@semihalf.com>
      7 *
      8 * This file is licensed under the terms of the GNU General Public
      9 * License version 2. This program is licensed "as is" without any
     10 * warranty of any kind, whether express or implied.
     11 */
     12
     13#include <linux/clk.h>
     14#include <linux/genalloc.h>
     15#include <linux/io.h>
     16#include <linux/kernel.h>
     17#include <linux/mbus.h>
     18#include <linux/module.h>
     19#include <linux/netdevice.h>
     20#include <linux/of.h>
     21#include <linux/of_platform.h>
     22#include <linux/platform_device.h>
     23#include <linux/skbuff.h>
     24#include <net/hwbm.h>
     25#include "mvneta_bm.h"
     26
     27#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
     28#define MVNETA_BM_DRIVER_VERSION "1.0"
     29
     30static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
     31{
     32	writel(data, priv->reg_base + offset);
     33}
     34
     35static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
     36{
     37	return readl(priv->reg_base + offset);
     38}
     39
     40static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
     41{
     42	u32 val;
     43
     44	val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
     45	val |= MVNETA_BM_POOL_ENABLE_MASK;
     46	mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
     47
     48	/* Clear BM cause register */
     49	mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
     50}
     51
     52static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
     53{
     54	u32 val;
     55
     56	val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
     57	val &= ~MVNETA_BM_POOL_ENABLE_MASK;
     58	mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
     59}
     60
     61static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
     62{
     63	u32 val;
     64
     65	val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
     66	val |= mask;
     67	mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
     68}
     69
     70static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
     71{
     72	u32 val;
     73
     74	val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
     75	val &= ~mask;
     76	mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
     77}
     78
     79static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
     80				      u8 target_id, u8 attr)
     81{
     82	u32 val;
     83
     84	val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
     85	val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
     86	val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
     87	val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
     88	val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
     89
     90	mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
     91}
     92
     93int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
     94{
     95	struct mvneta_bm_pool *bm_pool =
     96		(struct mvneta_bm_pool *)hwbm_pool->priv;
     97	struct mvneta_bm *priv = bm_pool->priv;
     98	dma_addr_t phys_addr;
     99
    100	/* In order to update buf_cookie field of RX descriptor properly,
    101	 * BM hardware expects buf virtual address to be placed in the
    102	 * first four bytes of mapped buffer.
    103	 */
    104	*(u32 *)buf = (u32)buf;
    105	phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
    106				   DMA_FROM_DEVICE);
    107	if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
    108		return -ENOMEM;
    109
    110	mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
    111	return 0;
    112}
    113EXPORT_SYMBOL_GPL(mvneta_bm_construct);
    114
    115/* Create pool */
    116static int mvneta_bm_pool_create(struct mvneta_bm *priv,
    117				 struct mvneta_bm_pool *bm_pool)
    118{
    119	struct platform_device *pdev = priv->pdev;
    120	u8 target_id, attr;
    121	int size_bytes, err;
    122	size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
    123	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
    124						&bm_pool->phys_addr,
    125						GFP_KERNEL);
    126	if (!bm_pool->virt_addr)
    127		return -ENOMEM;
    128
    129	if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
    130		dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
    131				  bm_pool->phys_addr);
    132		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
    133			bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
    134		return -ENOMEM;
    135	}
    136
    137	err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
    138					   &attr);
    139	if (err < 0) {
    140		dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
    141				  bm_pool->phys_addr);
    142		return err;
    143	}
    144
    145	/* Set pool address */
    146	mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
    147			bm_pool->phys_addr);
    148
    149	mvneta_bm_pool_target_set(priv, bm_pool->id, target_id,  attr);
    150	mvneta_bm_pool_enable(priv, bm_pool->id);
    151
    152	return 0;
    153}
    154
    155/* Notify the driver that BM pool is being used as specific type and return the
    156 * pool pointer on success
    157 */
    158struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
    159					  enum mvneta_bm_type type, u8 port_id,
    160					  int pkt_size)
    161{
    162	struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
    163	int num, err;
    164
    165	if (new_pool->type == MVNETA_BM_LONG &&
    166	    new_pool->port_map != 1 << port_id) {
    167		dev_err(&priv->pdev->dev,
    168			"long pool cannot be shared by the ports\n");
    169		return NULL;
    170	}
    171
    172	if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
    173		dev_err(&priv->pdev->dev,
    174			"mixing pools' types between the ports is forbidden\n");
    175		return NULL;
    176	}
    177
    178	if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
    179		new_pool->pkt_size = pkt_size;
    180
    181	/* Allocate buffers in case BM pool hasn't been used yet */
    182	if (new_pool->type == MVNETA_BM_FREE) {
    183		struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
    184
    185		new_pool->priv = priv;
    186		new_pool->type = type;
    187		new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
    188		hwbm_pool->frag_size =
    189			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
    190			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
    191		hwbm_pool->construct = mvneta_bm_construct;
    192		hwbm_pool->priv = new_pool;
    193		mutex_init(&hwbm_pool->buf_lock);
    194
    195		/* Create new pool */
    196		err = mvneta_bm_pool_create(priv, new_pool);
    197		if (err) {
    198			dev_err(&priv->pdev->dev, "fail to create pool %d\n",
    199				new_pool->id);
    200			return NULL;
    201		}
    202
    203		/* Allocate buffers for this pool */
    204		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
    205		if (num != hwbm_pool->size) {
    206			WARN(1, "pool %d: %d of %d allocated\n",
    207			     new_pool->id, num, hwbm_pool->size);
    208			return NULL;
    209		}
    210	}
    211
    212	return new_pool;
    213}
    214EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
    215
    216/* Free all buffers from the pool */
    217void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
    218			 u8 port_map)
    219{
    220	int i;
    221
    222	bm_pool->port_map &= ~port_map;
    223	if (bm_pool->port_map)
    224		return;
    225
    226	mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
    227
    228	for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
    229		dma_addr_t buf_phys_addr;
    230		u32 *vaddr;
    231
    232		/* Get buffer physical address (indirect access) */
    233		buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
    234
    235		/* Work-around to the problems when destroying the pool,
    236		 * when it occurs that a read access to BPPI returns 0.
    237		 */
    238		if (buf_phys_addr == 0)
    239			continue;
    240
    241		vaddr = phys_to_virt(buf_phys_addr);
    242		if (!vaddr)
    243			break;
    244
    245		dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
    246				 bm_pool->buf_size, DMA_FROM_DEVICE);
    247		hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
    248	}
    249
    250	mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
    251
    252	/* Update BM driver with number of buffers removed from pool */
    253	bm_pool->hwbm_pool.buf_num -= i;
    254}
    255EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
    256
    257/* Cleanup pool */
    258void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
    259			    struct mvneta_bm_pool *bm_pool, u8 port_map)
    260{
    261	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
    262	bm_pool->port_map &= ~port_map;
    263	if (bm_pool->port_map)
    264		return;
    265
    266	bm_pool->type = MVNETA_BM_FREE;
    267
    268	mvneta_bm_bufs_free(priv, bm_pool, port_map);
    269	if (hwbm_pool->buf_num)
    270		WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
    271
    272	if (bm_pool->virt_addr) {
    273		dma_free_coherent(&priv->pdev->dev,
    274				  sizeof(u32) * hwbm_pool->size,
    275				  bm_pool->virt_addr, bm_pool->phys_addr);
    276		bm_pool->virt_addr = NULL;
    277	}
    278
    279	mvneta_bm_pool_disable(priv, bm_pool->id);
    280}
    281EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
    282
    283static void mvneta_bm_pools_init(struct mvneta_bm *priv)
    284{
    285	struct device_node *dn = priv->pdev->dev.of_node;
    286	struct mvneta_bm_pool *bm_pool;
    287	char prop[15];
    288	u32 size;
    289	int i;
    290
    291	/* Activate BM unit */
    292	mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
    293
    294	/* Create all pools with maximum size */
    295	for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
    296		bm_pool = &priv->bm_pools[i];
    297		bm_pool->id = i;
    298		bm_pool->type = MVNETA_BM_FREE;
    299
    300		/* Reset read pointer */
    301		mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
    302
    303		/* Reset write pointer */
    304		mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
    305
    306		/* Configure pool size according to DT or use default value */
    307		sprintf(prop, "pool%d,capacity", i);
    308		if (of_property_read_u32(dn, prop, &size)) {
    309			size = MVNETA_BM_POOL_CAP_DEF;
    310		} else if (size > MVNETA_BM_POOL_CAP_MAX) {
    311			dev_warn(&priv->pdev->dev,
    312				 "Illegal pool %d capacity %d, set to %d\n",
    313				 i, size, MVNETA_BM_POOL_CAP_MAX);
    314			size = MVNETA_BM_POOL_CAP_MAX;
    315		} else if (size < MVNETA_BM_POOL_CAP_MIN) {
    316			dev_warn(&priv->pdev->dev,
    317				 "Illegal pool %d capacity %d, set to %d\n",
    318				 i, size, MVNETA_BM_POOL_CAP_MIN);
    319			size = MVNETA_BM_POOL_CAP_MIN;
    320		} else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
    321			dev_warn(&priv->pdev->dev,
    322				 "Illegal pool %d capacity %d, round to %d\n",
    323				 i, size, ALIGN(size,
    324				 MVNETA_BM_POOL_CAP_ALIGN));
    325			size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
    326		}
    327		bm_pool->hwbm_pool.size = size;
    328
    329		mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
    330				bm_pool->hwbm_pool.size);
    331
    332		/* Obtain custom pkt_size from DT */
    333		sprintf(prop, "pool%d,pkt-size", i);
    334		if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
    335			bm_pool->pkt_size = 0;
    336	}
    337}
    338
    339static void mvneta_bm_default_set(struct mvneta_bm *priv)
    340{
    341	u32 val;
    342
    343	/* Mask BM all interrupts */
    344	mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
    345
    346	/* Clear BM cause register */
    347	mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
    348
    349	/* Set BM configuration register */
    350	val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
    351
    352	/* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
    353	val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
    354	val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
    355	mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
    356}
    357
    358static int mvneta_bm_init(struct mvneta_bm *priv)
    359{
    360	mvneta_bm_default_set(priv);
    361
    362	/* Allocate and initialize BM pools structures */
    363	priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
    364				      sizeof(struct mvneta_bm_pool),
    365				      GFP_KERNEL);
    366	if (!priv->bm_pools)
    367		return -ENOMEM;
    368
    369	mvneta_bm_pools_init(priv);
    370
    371	return 0;
    372}
    373
    374static int mvneta_bm_get_sram(struct device_node *dn,
    375			      struct mvneta_bm *priv)
    376{
    377	priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
    378	if (!priv->bppi_pool)
    379		return -ENOMEM;
    380
    381	priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
    382						  MVNETA_BM_BPPI_SIZE,
    383						  &priv->bppi_phys_addr);
    384	if (!priv->bppi_virt_addr)
    385		return -ENOMEM;
    386
    387	return 0;
    388}
    389
    390static void mvneta_bm_put_sram(struct mvneta_bm *priv)
    391{
    392	gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
    393		      MVNETA_BM_BPPI_SIZE);
    394}
    395
    396struct mvneta_bm *mvneta_bm_get(struct device_node *node)
    397{
    398	struct platform_device *pdev = of_find_device_by_node(node);
    399
    400	return pdev ? platform_get_drvdata(pdev) : NULL;
    401}
    402EXPORT_SYMBOL_GPL(mvneta_bm_get);
    403
    404void mvneta_bm_put(struct mvneta_bm *priv)
    405{
    406	platform_device_put(priv->pdev);
    407}
    408EXPORT_SYMBOL_GPL(mvneta_bm_put);
    409
    410static int mvneta_bm_probe(struct platform_device *pdev)
    411{
    412	struct device_node *dn = pdev->dev.of_node;
    413	struct mvneta_bm *priv;
    414	int err;
    415
    416	priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
    417	if (!priv)
    418		return -ENOMEM;
    419
    420	priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
    421	if (IS_ERR(priv->reg_base))
    422		return PTR_ERR(priv->reg_base);
    423
    424	priv->clk = devm_clk_get(&pdev->dev, NULL);
    425	if (IS_ERR(priv->clk))
    426		return PTR_ERR(priv->clk);
    427	err = clk_prepare_enable(priv->clk);
    428	if (err < 0)
    429		return err;
    430
    431	err = mvneta_bm_get_sram(dn, priv);
    432	if (err < 0) {
    433		dev_err(&pdev->dev, "failed to allocate internal memory\n");
    434		goto err_clk;
    435	}
    436
    437	priv->pdev = pdev;
    438
    439	/* Initialize buffer manager internals */
    440	err = mvneta_bm_init(priv);
    441	if (err < 0) {
    442		dev_err(&pdev->dev, "failed to initialize controller\n");
    443		goto err_sram;
    444	}
    445
    446	dn->data = priv;
    447	platform_set_drvdata(pdev, priv);
    448
    449	dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
    450
    451	return 0;
    452
    453err_sram:
    454	mvneta_bm_put_sram(priv);
    455err_clk:
    456	clk_disable_unprepare(priv->clk);
    457	return err;
    458}
    459
    460static int mvneta_bm_remove(struct platform_device *pdev)
    461{
    462	struct mvneta_bm *priv = platform_get_drvdata(pdev);
    463	u8 all_ports_map = 0xff;
    464	int i = 0;
    465
    466	for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
    467		struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
    468
    469		mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
    470	}
    471
    472	mvneta_bm_put_sram(priv);
    473
    474	/* Dectivate BM unit */
    475	mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
    476
    477	clk_disable_unprepare(priv->clk);
    478
    479	return 0;
    480}
    481
    482static const struct of_device_id mvneta_bm_match[] = {
    483	{ .compatible = "marvell,armada-380-neta-bm" },
    484	{ }
    485};
    486MODULE_DEVICE_TABLE(of, mvneta_bm_match);
    487
    488static struct platform_driver mvneta_bm_driver = {
    489	.probe = mvneta_bm_probe,
    490	.remove = mvneta_bm_remove,
    491	.driver = {
    492		.name = MVNETA_BM_DRIVER_NAME,
    493		.of_match_table = mvneta_bm_match,
    494	},
    495};
    496
    497module_platform_driver(mvneta_bm_driver);
    498
    499MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
    500MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
    501MODULE_LICENSE("GPL v2");