cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ixp4xx-qmgr.c (11925B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Intel IXP4xx Queue Manager driver for Linux
      4 *
      5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
      6 */
      7
      8#include <linux/ioport.h>
      9#include <linux/interrupt.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/of.h>
     13#include <linux/platform_device.h>
     14#include <linux/soc/ixp4xx/qmgr.h>
     15#include <linux/soc/ixp4xx/cpu.h>
     16
     17static struct qmgr_regs __iomem *qmgr_regs;
     18static int qmgr_irq_1;
     19static int qmgr_irq_2;
     20static spinlock_t qmgr_lock;
     21static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
     22static void (*irq_handlers[QUEUES])(void *pdev);
     23static void *irq_pdevs[QUEUES];
     24
     25#if DEBUG_QMGR
     26char qmgr_queue_descs[QUEUES][32];
     27#endif
     28
     29void qmgr_put_entry(unsigned int queue, u32 val)
     30{
     31#if DEBUG_QMGR
     32	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
     33
     34	printk(KERN_DEBUG "Queue %s(%i) put %X\n",
     35	       qmgr_queue_descs[queue], queue, val);
     36#endif
     37	__raw_writel(val, &qmgr_regs->acc[queue][0]);
     38}
     39
     40u32 qmgr_get_entry(unsigned int queue)
     41{
     42	u32 val;
     43	val = __raw_readl(&qmgr_regs->acc[queue][0]);
     44#if DEBUG_QMGR
     45	BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
     46
     47	printk(KERN_DEBUG "Queue %s(%i) get %X\n",
     48	       qmgr_queue_descs[queue], queue, val);
     49#endif
     50	return val;
     51}
     52
     53static int __qmgr_get_stat1(unsigned int queue)
     54{
     55	return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
     56		>> ((queue & 7) << 2)) & 0xF;
     57}
     58
     59static int __qmgr_get_stat2(unsigned int queue)
     60{
     61	BUG_ON(queue >= HALF_QUEUES);
     62	return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
     63		>> ((queue & 0xF) << 1)) & 0x3;
     64}
     65
     66/**
     67 * qmgr_stat_empty() - checks if a hardware queue is empty
     68 * @queue:	queue number
     69 *
     70 * Returns non-zero value if the queue is empty.
     71 */
     72int qmgr_stat_empty(unsigned int queue)
     73{
     74	BUG_ON(queue >= HALF_QUEUES);
     75	return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
     76}
     77
     78/**
     79 * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
     80 * @queue:	queue number
     81 *
     82 * Returns non-zero value if the queue is below low watermark.
     83 */
     84int qmgr_stat_below_low_watermark(unsigned int queue)
     85{
     86	if (queue >= HALF_QUEUES)
     87		return (__raw_readl(&qmgr_regs->statne_h) >>
     88			(queue - HALF_QUEUES)) & 0x01;
     89	return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
     90}
     91
     92/**
     93 * qmgr_stat_full() - checks if a hardware queue is full
     94 * @queue:	queue number
     95 *
     96 * Returns non-zero value if the queue is full.
     97 */
     98int qmgr_stat_full(unsigned int queue)
     99{
    100	if (queue >= HALF_QUEUES)
    101		return (__raw_readl(&qmgr_regs->statf_h) >>
    102			(queue - HALF_QUEUES)) & 0x01;
    103	return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
    104}
    105
    106/**
    107 * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
    108 * @queue:	queue number
    109 *
    110 * Returns non-zero value if the queue experienced overflow.
    111 */
    112int qmgr_stat_overflow(unsigned int queue)
    113{
    114	return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
    115}
    116
    117void qmgr_set_irq(unsigned int queue, int src,
    118		  void (*handler)(void *pdev), void *pdev)
    119{
    120	unsigned long flags;
    121
    122	spin_lock_irqsave(&qmgr_lock, flags);
    123	if (queue < HALF_QUEUES) {
    124		u32 __iomem *reg;
    125		int bit;
    126		BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
    127		reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
    128		bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
    129		__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
    130			     reg);
    131	} else
    132		/* IRQ source for queues 32-63 is fixed */
    133		BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
    134
    135	irq_handlers[queue] = handler;
    136	irq_pdevs[queue] = pdev;
    137	spin_unlock_irqrestore(&qmgr_lock, flags);
    138}
    139
    140
    141static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
    142{
    143	int i, ret = 0;
    144	u32 en_bitmap, src, stat;
    145
    146	/* ACK - it may clear any bits so don't rely on it */
    147	__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
    148
    149	en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
    150	while (en_bitmap) {
    151		i = __fls(en_bitmap); /* number of the last "low" queue */
    152		en_bitmap &= ~BIT(i);
    153		src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
    154		stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
    155		if (src & 4) /* the IRQ condition is inverted */
    156			stat = ~stat;
    157		if (stat & BIT(src & 3)) {
    158			irq_handlers[i](irq_pdevs[i]);
    159			ret = IRQ_HANDLED;
    160		}
    161	}
    162	return ret;
    163}
    164
    165
    166static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
    167{
    168	int i, ret = 0;
    169	u32 req_bitmap;
    170
    171	/* ACK - it may clear any bits so don't rely on it */
    172	__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
    173
    174	req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
    175		     __raw_readl(&qmgr_regs->statne_h);
    176	while (req_bitmap) {
    177		i = __fls(req_bitmap); /* number of the last "high" queue */
    178		req_bitmap &= ~BIT(i);
    179		irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
    180		ret = IRQ_HANDLED;
    181	}
    182	return ret;
    183}
    184
    185
    186static irqreturn_t qmgr_irq(int irq, void *pdev)
    187{
    188	int i, half = (irq == qmgr_irq_1 ? 0 : 1);
    189	u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
    190
    191	if (!req_bitmap)
    192		return 0;
    193	__raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
    194
    195	while (req_bitmap) {
    196		i = __fls(req_bitmap); /* number of the last queue */
    197		req_bitmap &= ~BIT(i);
    198		i += half * HALF_QUEUES;
    199		irq_handlers[i](irq_pdevs[i]);
    200	}
    201	return IRQ_HANDLED;
    202}
    203
    204
    205void qmgr_enable_irq(unsigned int queue)
    206{
    207	unsigned long flags;
    208	int half = queue / 32;
    209	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
    210
    211	spin_lock_irqsave(&qmgr_lock, flags);
    212	__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
    213		     &qmgr_regs->irqen[half]);
    214	spin_unlock_irqrestore(&qmgr_lock, flags);
    215}
    216
    217void qmgr_disable_irq(unsigned int queue)
    218{
    219	unsigned long flags;
    220	int half = queue / 32;
    221	u32 mask = 1 << (queue & (HALF_QUEUES - 1));
    222
    223	spin_lock_irqsave(&qmgr_lock, flags);
    224	__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
    225		     &qmgr_regs->irqen[half]);
    226	__raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
    227	spin_unlock_irqrestore(&qmgr_lock, flags);
    228}
    229
    230static inline void shift_mask(u32 *mask)
    231{
    232	mask[3] = mask[3] << 1 | mask[2] >> 31;
    233	mask[2] = mask[2] << 1 | mask[1] >> 31;
    234	mask[1] = mask[1] << 1 | mask[0] >> 31;
    235	mask[0] <<= 1;
    236}
    237
    238#if DEBUG_QMGR
    239int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
    240		       unsigned int nearly_empty_watermark,
    241		       unsigned int nearly_full_watermark,
    242		       const char *desc_format, const char* name)
    243#else
    244int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
    245			 unsigned int nearly_empty_watermark,
    246			 unsigned int nearly_full_watermark)
    247#endif
    248{
    249	u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
    250	int err;
    251
    252	BUG_ON(queue >= QUEUES);
    253
    254	if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
    255		return -EINVAL;
    256
    257	switch (len) {
    258	case  16:
    259		cfg = 0 << 24;
    260		mask[0] = 0x1;
    261		break;
    262	case  32:
    263		cfg = 1 << 24;
    264		mask[0] = 0x3;
    265		break;
    266	case  64:
    267		cfg = 2 << 24;
    268		mask[0] = 0xF;
    269		break;
    270	case 128:
    271		cfg = 3 << 24;
    272		mask[0] = 0xFF;
    273		break;
    274	default:
    275		return -EINVAL;
    276	}
    277
    278	cfg |= nearly_empty_watermark << 26;
    279	cfg |= nearly_full_watermark << 29;
    280	len /= 16;		/* in 16-dwords: 1, 2, 4 or 8 */
    281	mask[1] = mask[2] = mask[3] = 0;
    282
    283	if (!try_module_get(THIS_MODULE))
    284		return -ENODEV;
    285
    286	spin_lock_irq(&qmgr_lock);
    287	if (__raw_readl(&qmgr_regs->sram[queue])) {
    288		err = -EBUSY;
    289		goto err;
    290	}
    291
    292	while (1) {
    293		if (!(used_sram_bitmap[0] & mask[0]) &&
    294		    !(used_sram_bitmap[1] & mask[1]) &&
    295		    !(used_sram_bitmap[2] & mask[2]) &&
    296		    !(used_sram_bitmap[3] & mask[3]))
    297			break; /* found free space */
    298
    299		addr++;
    300		shift_mask(mask);
    301		if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
    302			printk(KERN_ERR "qmgr: no free SRAM space for"
    303			       " queue %i\n", queue);
    304			err = -ENOMEM;
    305			goto err;
    306		}
    307	}
    308
    309	used_sram_bitmap[0] |= mask[0];
    310	used_sram_bitmap[1] |= mask[1];
    311	used_sram_bitmap[2] |= mask[2];
    312	used_sram_bitmap[3] |= mask[3];
    313	__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
    314#if DEBUG_QMGR
    315	snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
    316		 desc_format, name);
    317	printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
    318	       qmgr_queue_descs[queue], queue, addr);
    319#endif
    320	spin_unlock_irq(&qmgr_lock);
    321	return 0;
    322
    323err:
    324	spin_unlock_irq(&qmgr_lock);
    325	module_put(THIS_MODULE);
    326	return err;
    327}
    328
    329void qmgr_release_queue(unsigned int queue)
    330{
    331	u32 cfg, addr, mask[4];
    332
    333	BUG_ON(queue >= QUEUES); /* not in valid range */
    334
    335	spin_lock_irq(&qmgr_lock);
    336	cfg = __raw_readl(&qmgr_regs->sram[queue]);
    337	addr = (cfg >> 14) & 0xFF;
    338
    339	BUG_ON(!addr);		/* not requested */
    340
    341	switch ((cfg >> 24) & 3) {
    342	case 0: mask[0] = 0x1; break;
    343	case 1: mask[0] = 0x3; break;
    344	case 2: mask[0] = 0xF; break;
    345	case 3: mask[0] = 0xFF; break;
    346	}
    347
    348	mask[1] = mask[2] = mask[3] = 0;
    349
    350	while (addr--)
    351		shift_mask(mask);
    352
    353#if DEBUG_QMGR
    354	printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
    355	       qmgr_queue_descs[queue], queue);
    356	qmgr_queue_descs[queue][0] = '\x0';
    357#endif
    358
    359	while ((addr = qmgr_get_entry(queue)))
    360		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
    361		       queue, addr);
    362
    363	__raw_writel(0, &qmgr_regs->sram[queue]);
    364
    365	used_sram_bitmap[0] &= ~mask[0];
    366	used_sram_bitmap[1] &= ~mask[1];
    367	used_sram_bitmap[2] &= ~mask[2];
    368	used_sram_bitmap[3] &= ~mask[3];
    369	irq_handlers[queue] = NULL; /* catch IRQ bugs */
    370	spin_unlock_irq(&qmgr_lock);
    371
    372	module_put(THIS_MODULE);
    373}
    374
    375static int ixp4xx_qmgr_probe(struct platform_device *pdev)
    376{
    377	int i, err;
    378	irq_handler_t handler1, handler2;
    379	struct device *dev = &pdev->dev;
    380	struct resource *res;
    381	int irq1, irq2;
    382
    383	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    384	if (!res)
    385		return -ENODEV;
    386	qmgr_regs = devm_ioremap_resource(dev, res);
    387	if (IS_ERR(qmgr_regs))
    388		return PTR_ERR(qmgr_regs);
    389
    390	irq1 = platform_get_irq(pdev, 0);
    391	if (irq1 <= 0)
    392		return irq1 ? irq1 : -EINVAL;
    393	qmgr_irq_1 = irq1;
    394	irq2 = platform_get_irq(pdev, 1);
    395	if (irq2 <= 0)
    396		return irq2 ? irq2 : -EINVAL;
    397	qmgr_irq_2 = irq2;
    398
    399	/* reset qmgr registers */
    400	for (i = 0; i < 4; i++) {
    401		__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
    402		__raw_writel(0, &qmgr_regs->irqsrc[i]);
    403	}
    404	for (i = 0; i < 2; i++) {
    405		__raw_writel(0, &qmgr_regs->stat2[i]);
    406		__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
    407		__raw_writel(0, &qmgr_regs->irqen[i]);
    408	}
    409
    410	__raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
    411	__raw_writel(0, &qmgr_regs->statf_h);
    412
    413	for (i = 0; i < QUEUES; i++)
    414		__raw_writel(0, &qmgr_regs->sram[i]);
    415
    416	if (cpu_is_ixp42x_rev_a0()) {
    417		handler1 = qmgr_irq1_a0;
    418		handler2 = qmgr_irq2_a0;
    419	} else
    420		handler1 = handler2 = qmgr_irq;
    421
    422	err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
    423			       NULL);
    424	if (err) {
    425		dev_err(dev, "failed to request IRQ%i (%i)\n",
    426			irq1, err);
    427		return err;
    428	}
    429
    430	err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
    431			       NULL);
    432	if (err) {
    433		dev_err(dev, "failed to request IRQ%i (%i)\n",
    434			irq2, err);
    435		return err;
    436	}
    437
    438	used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
    439	spin_lock_init(&qmgr_lock);
    440
    441	dev_info(dev, "IXP4xx Queue Manager initialized.\n");
    442	return 0;
    443}
    444
    445static int ixp4xx_qmgr_remove(struct platform_device *pdev)
    446{
    447	synchronize_irq(qmgr_irq_1);
    448	synchronize_irq(qmgr_irq_2);
    449	return 0;
    450}
    451
    452static const struct of_device_id ixp4xx_qmgr_of_match[] = {
    453	{
    454		.compatible = "intel,ixp4xx-ahb-queue-manager",
    455        },
    456	{},
    457};
    458
    459static struct platform_driver ixp4xx_qmgr_driver = {
    460	.driver = {
    461		.name           = "ixp4xx-qmgr",
    462		.of_match_table = ixp4xx_qmgr_of_match,
    463	},
    464	.probe = ixp4xx_qmgr_probe,
    465	.remove = ixp4xx_qmgr_remove,
    466};
    467module_platform_driver(ixp4xx_qmgr_driver);
    468
    469MODULE_LICENSE("GPL v2");
    470MODULE_AUTHOR("Krzysztof Halasa");
    471
    472EXPORT_SYMBOL(qmgr_put_entry);
    473EXPORT_SYMBOL(qmgr_get_entry);
    474EXPORT_SYMBOL(qmgr_stat_empty);
    475EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
    476EXPORT_SYMBOL(qmgr_stat_full);
    477EXPORT_SYMBOL(qmgr_stat_overflow);
    478EXPORT_SYMBOL(qmgr_set_irq);
    479EXPORT_SYMBOL(qmgr_enable_irq);
    480EXPORT_SYMBOL(qmgr_disable_irq);
    481#if DEBUG_QMGR
    482EXPORT_SYMBOL(qmgr_queue_descs);
    483EXPORT_SYMBOL(qmgr_request_queue);
    484#else
    485EXPORT_SYMBOL(__qmgr_request_queue);
    486#endif
    487EXPORT_SYMBOL(qmgr_release_queue);