cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq-qcom-mpm.c (12260B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2021, Linaro Limited
      4 * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/err.h>
      9#include <linux/init.h>
     10#include <linux/interrupt.h>
     11#include <linux/io.h>
     12#include <linux/irqchip.h>
     13#include <linux/irqdomain.h>
     14#include <linux/mailbox_client.h>
     15#include <linux/module.h>
     16#include <linux/of.h>
     17#include <linux/of_device.h>
     18#include <linux/platform_device.h>
     19#include <linux/pm_domain.h>
     20#include <linux/slab.h>
     21#include <linux/soc/qcom/irq.h>
     22#include <linux/spinlock.h>
     23
     24/*
     25 * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
     26 * which is commonly found on Qualcomm SoCs built on the RPM architecture.
     27 * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
     28 * asleep, and wakes up the AP when one of those interrupts occurs.  This driver
     29 * doesn't directly access physical MPM registers though.  Instead, the access
     30 * is bridged via a piece of internal memory (SRAM) that is accessible to both
     31 * AP and RPM.  This piece of memory is called 'vMPM' in the driver.
     32 *
     33 * When SoC is awake, the vMPM is owned by AP and the register setup by this
     34 * driver all happens on vMPM.  When AP is about to get power collapsed, the
     35 * driver sends a mailbox notification to RPM, which will take over the vMPM
     36 * ownership and dump vMPM into physical MPM registers.  On wakeup, AP is woken
     37 * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
     38 * Then AP start owning vMPM again.
     39 *
     40 * vMPM register map:
     41 *
     42 *    31                              0
     43 *    +--------------------------------+
     44 *    |            TIMER0              | 0x00
     45 *    +--------------------------------+
     46 *    |            TIMER1              | 0x04
     47 *    +--------------------------------+
     48 *    |            ENABLE0             | 0x08
     49 *    +--------------------------------+
     50 *    |              ...               | ...
     51 *    +--------------------------------+
     52 *    |            ENABLEn             |
     53 *    +--------------------------------+
     54 *    |          FALLING_EDGE0         |
     55 *    +--------------------------------+
     56 *    |              ...               |
     57 *    +--------------------------------+
     58 *    |            STATUSn             |
     59 *    +--------------------------------+
     60 *
     61 *    n = DIV_ROUND_UP(pin_cnt, 32)
     62 *
     63 */
     64
     65#define MPM_REG_ENABLE		0
     66#define MPM_REG_FALLING_EDGE	1
     67#define MPM_REG_RISING_EDGE	2
     68#define MPM_REG_POLARITY	3
     69#define MPM_REG_STATUS		4
     70
     71/* MPM pin map to GIC hwirq */
     72struct mpm_gic_map {
     73	int pin;
     74	irq_hw_number_t hwirq;
     75};
     76
     77struct qcom_mpm_priv {
     78	void __iomem *base;
     79	raw_spinlock_t lock;
     80	struct mbox_client mbox_client;
     81	struct mbox_chan *mbox_chan;
     82	struct mpm_gic_map *maps;
     83	unsigned int map_cnt;
     84	unsigned int reg_stride;
     85	struct irq_domain *domain;
     86	struct generic_pm_domain genpd;
     87};
     88
     89static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
     90			 unsigned int index)
     91{
     92	unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
     93
     94	return readl_relaxed(priv->base + offset);
     95}
     96
     97static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
     98			   unsigned int index, u32 val)
     99{
    100	unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
    101
    102	writel_relaxed(val, priv->base + offset);
    103
    104	/* Ensure the write is completed */
    105	wmb();
    106}
    107
    108static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
    109{
    110	struct qcom_mpm_priv *priv = d->chip_data;
    111	int pin = d->hwirq;
    112	unsigned int index = pin / 32;
    113	unsigned int shift = pin % 32;
    114	unsigned long flags, val;
    115
    116	raw_spin_lock_irqsave(&priv->lock, flags);
    117
    118	val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
    119	__assign_bit(shift, &val, en);
    120	qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
    121
    122	raw_spin_unlock_irqrestore(&priv->lock, flags);
    123}
    124
    125static void qcom_mpm_mask(struct irq_data *d)
    126{
    127	qcom_mpm_enable_irq(d, false);
    128
    129	if (d->parent_data)
    130		irq_chip_mask_parent(d);
    131}
    132
    133static void qcom_mpm_unmask(struct irq_data *d)
    134{
    135	qcom_mpm_enable_irq(d, true);
    136
    137	if (d->parent_data)
    138		irq_chip_unmask_parent(d);
    139}
    140
    141static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
    142			 unsigned int index, unsigned int shift)
    143{
    144	unsigned long flags, val;
    145
    146	raw_spin_lock_irqsave(&priv->lock, flags);
    147
    148	val = qcom_mpm_read(priv, reg, index);
    149	__assign_bit(shift, &val, set);
    150	qcom_mpm_write(priv, reg, index, val);
    151
    152	raw_spin_unlock_irqrestore(&priv->lock, flags);
    153}
    154
    155static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
    156{
    157	struct qcom_mpm_priv *priv = d->chip_data;
    158	int pin = d->hwirq;
    159	unsigned int index = pin / 32;
    160	unsigned int shift = pin % 32;
    161
    162	if (type & IRQ_TYPE_EDGE_RISING)
    163		mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
    164	else
    165		mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
    166
    167	if (type & IRQ_TYPE_EDGE_FALLING)
    168		mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
    169	else
    170		mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
    171
    172	if (type & IRQ_TYPE_LEVEL_HIGH)
    173		mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
    174	else
    175		mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
    176
    177	if (!d->parent_data)
    178		return 0;
    179
    180	if (type & IRQ_TYPE_EDGE_BOTH)
    181		type = IRQ_TYPE_EDGE_RISING;
    182
    183	if (type & IRQ_TYPE_LEVEL_MASK)
    184		type = IRQ_TYPE_LEVEL_HIGH;
    185
    186	return irq_chip_set_type_parent(d, type);
    187}
    188
    189static struct irq_chip qcom_mpm_chip = {
    190	.name			= "mpm",
    191	.irq_eoi		= irq_chip_eoi_parent,
    192	.irq_mask		= qcom_mpm_mask,
    193	.irq_unmask		= qcom_mpm_unmask,
    194	.irq_retrigger		= irq_chip_retrigger_hierarchy,
    195	.irq_set_type		= qcom_mpm_set_type,
    196	.irq_set_affinity	= irq_chip_set_affinity_parent,
    197	.flags			= IRQCHIP_MASK_ON_SUSPEND |
    198				  IRQCHIP_SKIP_SET_WAKE,
    199};
    200
    201static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
    202{
    203	struct mpm_gic_map *maps = priv->maps;
    204	int i;
    205
    206	for (i = 0; i < priv->map_cnt; i++) {
    207		if (maps[i].pin == pin)
    208			return &maps[i];
    209	}
    210
    211	return NULL;
    212}
    213
    214static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
    215			  unsigned int nr_irqs, void *data)
    216{
    217	struct qcom_mpm_priv *priv = domain->host_data;
    218	struct irq_fwspec *fwspec = data;
    219	struct irq_fwspec parent_fwspec;
    220	struct mpm_gic_map *map;
    221	irq_hw_number_t pin;
    222	unsigned int type;
    223	int  ret;
    224
    225	ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
    226	if (ret)
    227		return ret;
    228
    229	ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
    230					    &qcom_mpm_chip, priv);
    231	if (ret)
    232		return ret;
    233
    234	map = get_mpm_gic_map(priv, pin);
    235	if (map == NULL)
    236		return irq_domain_disconnect_hierarchy(domain->parent, virq);
    237
    238	if (type & IRQ_TYPE_EDGE_BOTH)
    239		type = IRQ_TYPE_EDGE_RISING;
    240
    241	if (type & IRQ_TYPE_LEVEL_MASK)
    242		type = IRQ_TYPE_LEVEL_HIGH;
    243
    244	parent_fwspec.fwnode = domain->parent->fwnode;
    245	parent_fwspec.param_count = 3;
    246	parent_fwspec.param[0] = 0;
    247	parent_fwspec.param[1] = map->hwirq;
    248	parent_fwspec.param[2] = type;
    249
    250	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
    251					    &parent_fwspec);
    252}
    253
    254static const struct irq_domain_ops qcom_mpm_ops = {
    255	.alloc		= qcom_mpm_alloc,
    256	.free		= irq_domain_free_irqs_common,
    257	.translate	= irq_domain_translate_twocell,
    258};
    259
    260/* Triggered by RPM when system resumes from deep sleep */
    261static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
    262{
    263	struct qcom_mpm_priv *priv = dev_id;
    264	unsigned long enable, pending;
    265	irqreturn_t ret = IRQ_NONE;
    266	unsigned long flags;
    267	int i, j;
    268
    269	for (i = 0; i < priv->reg_stride; i++) {
    270		raw_spin_lock_irqsave(&priv->lock, flags);
    271		enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
    272		pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
    273		pending &= enable;
    274		raw_spin_unlock_irqrestore(&priv->lock, flags);
    275
    276		for_each_set_bit(j, &pending, 32) {
    277			unsigned int pin = 32 * i + j;
    278			struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
    279			struct irq_data *d = &desc->irq_data;
    280
    281			if (!irqd_is_level_type(d))
    282				irq_set_irqchip_state(d->irq,
    283						IRQCHIP_STATE_PENDING, true);
    284			ret = IRQ_HANDLED;
    285		}
    286	}
    287
    288	return ret;
    289}
    290
    291static int mpm_pd_power_off(struct generic_pm_domain *genpd)
    292{
    293	struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
    294						  genpd);
    295	int i, ret;
    296
    297	for (i = 0; i < priv->reg_stride; i++)
    298		qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
    299
    300	/* Notify RPM to write vMPM into HW */
    301	ret = mbox_send_message(priv->mbox_chan, NULL);
    302	if (ret < 0)
    303		return ret;
    304
    305	return 0;
    306}
    307
    308static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
    309{
    310	int i;
    311
    312	for (i = 0; i < cnt; i++)
    313		if (maps[i].hwirq == hwirq)
    314			return true;
    315
    316	return false;
    317}
    318
    319static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
    320{
    321	struct platform_device *pdev = of_find_device_by_node(np);
    322	struct device *dev = &pdev->dev;
    323	struct irq_domain *parent_domain;
    324	struct generic_pm_domain *genpd;
    325	struct qcom_mpm_priv *priv;
    326	unsigned int pin_cnt;
    327	int i, irq;
    328	int ret;
    329
    330	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
    331	if (!priv)
    332		return -ENOMEM;
    333
    334	ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
    335	if (ret) {
    336		dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
    337		return ret;
    338	}
    339
    340	priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
    341
    342	ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
    343	if (ret < 0) {
    344		dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
    345		return ret;
    346	}
    347
    348	if (ret % 2) {
    349		dev_err(dev, "invalid qcom,mpm-pin-map\n");
    350		return -EINVAL;
    351	}
    352
    353	priv->map_cnt = ret / 2;
    354	priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
    355				  GFP_KERNEL);
    356	if (!priv->maps)
    357		return -ENOMEM;
    358
    359	for (i = 0; i < priv->map_cnt; i++) {
    360		u32 pin, hwirq;
    361
    362		of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
    363		of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
    364
    365		if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
    366			dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
    367				 pin, hwirq);
    368			continue;
    369		}
    370
    371		priv->maps[i].pin = pin;
    372		priv->maps[i].hwirq = hwirq;
    373	}
    374
    375	raw_spin_lock_init(&priv->lock);
    376
    377	priv->base = devm_platform_ioremap_resource(pdev, 0);
    378	if (IS_ERR(priv->base))
    379		return PTR_ERR(priv->base);
    380
    381	for (i = 0; i < priv->reg_stride; i++) {
    382		qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
    383		qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
    384		qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
    385		qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
    386		qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
    387	}
    388
    389	irq = platform_get_irq(pdev, 0);
    390	if (irq < 0)
    391		return irq;
    392
    393	genpd = &priv->genpd;
    394	genpd->flags = GENPD_FLAG_IRQ_SAFE;
    395	genpd->power_off = mpm_pd_power_off;
    396
    397	genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
    398	if (!genpd->name)
    399		return -ENOMEM;
    400
    401	ret = pm_genpd_init(genpd, NULL, false);
    402	if (ret) {
    403		dev_err(dev, "failed to init genpd: %d\n", ret);
    404		return ret;
    405	}
    406
    407	ret = of_genpd_add_provider_simple(np, genpd);
    408	if (ret) {
    409		dev_err(dev, "failed to add genpd provider: %d\n", ret);
    410		goto remove_genpd;
    411	}
    412
    413	priv->mbox_client.dev = dev;
    414	priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
    415	if (IS_ERR(priv->mbox_chan)) {
    416		ret = PTR_ERR(priv->mbox_chan);
    417		dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
    418		return ret;
    419	}
    420
    421	parent_domain = irq_find_host(parent);
    422	if (!parent_domain) {
    423		dev_err(dev, "failed to find MPM parent domain\n");
    424		ret = -ENXIO;
    425		goto free_mbox;
    426	}
    427
    428	priv->domain = irq_domain_create_hierarchy(parent_domain,
    429				IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
    430				of_node_to_fwnode(np), &qcom_mpm_ops, priv);
    431	if (!priv->domain) {
    432		dev_err(dev, "failed to create MPM domain\n");
    433		ret = -ENOMEM;
    434		goto free_mbox;
    435	}
    436
    437	irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
    438
    439	ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
    440			       "qcom_mpm", priv);
    441	if (ret) {
    442		dev_err(dev, "failed to request irq: %d\n", ret);
    443		goto remove_domain;
    444	}
    445
    446	return 0;
    447
    448remove_domain:
    449	irq_domain_remove(priv->domain);
    450free_mbox:
    451	mbox_free_channel(priv->mbox_chan);
    452remove_genpd:
    453	pm_genpd_remove(genpd);
    454	return ret;
    455}
    456
    457IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
    458IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
    459IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
    460MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
    461MODULE_LICENSE("GPL v2");