cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

omap_irq.c (7743B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
      4 * Author: Rob Clark <rob.clark@linaro.org>
      5 */
      6
      7#include <drm/drm_vblank.h>
      8
      9#include "omap_drv.h"
     10
     11struct omap_irq_wait {
     12	struct list_head node;
     13	wait_queue_head_t wq;
     14	u32 irqmask;
     15	int count;
     16};
     17
     18/* call with wait_lock and dispc runtime held */
     19static void omap_irq_update(struct drm_device *dev)
     20{
     21	struct omap_drm_private *priv = dev->dev_private;
     22	struct omap_irq_wait *wait;
     23	u32 irqmask = priv->irq_mask;
     24
     25	assert_spin_locked(&priv->wait_lock);
     26
     27	list_for_each_entry(wait, &priv->wait_list, node)
     28		irqmask |= wait->irqmask;
     29
     30	DBG("irqmask=%08x", irqmask);
     31
     32	dispc_write_irqenable(priv->dispc, irqmask);
     33}
     34
     35static void omap_irq_wait_handler(struct omap_irq_wait *wait)
     36{
     37	wait->count--;
     38	wake_up(&wait->wq);
     39}
     40
     41struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
     42		u32 irqmask, int count)
     43{
     44	struct omap_drm_private *priv = dev->dev_private;
     45	struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
     46	unsigned long flags;
     47
     48	init_waitqueue_head(&wait->wq);
     49	wait->irqmask = irqmask;
     50	wait->count = count;
     51
     52	spin_lock_irqsave(&priv->wait_lock, flags);
     53	list_add(&wait->node, &priv->wait_list);
     54	omap_irq_update(dev);
     55	spin_unlock_irqrestore(&priv->wait_lock, flags);
     56
     57	return wait;
     58}
     59
     60int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
     61		unsigned long timeout)
     62{
     63	struct omap_drm_private *priv = dev->dev_private;
     64	unsigned long flags;
     65	int ret;
     66
     67	ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
     68
     69	spin_lock_irqsave(&priv->wait_lock, flags);
     70	list_del(&wait->node);
     71	omap_irq_update(dev);
     72	spin_unlock_irqrestore(&priv->wait_lock, flags);
     73
     74	kfree(wait);
     75
     76	return ret == 0 ? -1 : 0;
     77}
     78
     79int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
     80{
     81	struct drm_device *dev = crtc->dev;
     82	struct omap_drm_private *priv = dev->dev_private;
     83	unsigned long flags;
     84	enum omap_channel channel = omap_crtc_channel(crtc);
     85	int framedone_irq =
     86		dispc_mgr_get_framedone_irq(priv->dispc, channel);
     87
     88	DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
     89
     90	spin_lock_irqsave(&priv->wait_lock, flags);
     91	if (enable)
     92		priv->irq_mask |= framedone_irq;
     93	else
     94		priv->irq_mask &= ~framedone_irq;
     95	omap_irq_update(dev);
     96	spin_unlock_irqrestore(&priv->wait_lock, flags);
     97
     98	return 0;
     99}
    100
    101/**
    102 * enable_vblank - enable vblank interrupt events
    103 * @crtc: DRM CRTC
    104 *
    105 * Enable vblank interrupts for @crtc.  If the device doesn't have
    106 * a hardware vblank counter, this routine should be a no-op, since
    107 * interrupts will have to stay on to keep the count accurate.
    108 *
    109 * RETURNS
    110 * Zero on success, appropriate errno if the given @crtc's vblank
    111 * interrupt cannot be enabled.
    112 */
    113int omap_irq_enable_vblank(struct drm_crtc *crtc)
    114{
    115	struct drm_device *dev = crtc->dev;
    116	struct omap_drm_private *priv = dev->dev_private;
    117	unsigned long flags;
    118	enum omap_channel channel = omap_crtc_channel(crtc);
    119
    120	DBG("dev=%p, crtc=%u", dev, channel);
    121
    122	spin_lock_irqsave(&priv->wait_lock, flags);
    123	priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
    124							     channel);
    125	omap_irq_update(dev);
    126	spin_unlock_irqrestore(&priv->wait_lock, flags);
    127
    128	return 0;
    129}
    130
    131/**
    132 * disable_vblank - disable vblank interrupt events
    133 * @crtc: DRM CRTC
    134 *
    135 * Disable vblank interrupts for @crtc.  If the device doesn't have
    136 * a hardware vblank counter, this routine should be a no-op, since
    137 * interrupts will have to stay on to keep the count accurate.
    138 */
    139void omap_irq_disable_vblank(struct drm_crtc *crtc)
    140{
    141	struct drm_device *dev = crtc->dev;
    142	struct omap_drm_private *priv = dev->dev_private;
    143	unsigned long flags;
    144	enum omap_channel channel = omap_crtc_channel(crtc);
    145
    146	DBG("dev=%p, crtc=%u", dev, channel);
    147
    148	spin_lock_irqsave(&priv->wait_lock, flags);
    149	priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
    150							      channel);
    151	omap_irq_update(dev);
    152	spin_unlock_irqrestore(&priv->wait_lock, flags);
    153}
    154
    155static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
    156				    u32 irqstatus)
    157{
    158	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
    159				      DEFAULT_RATELIMIT_BURST);
    160	static const struct {
    161		const char *name;
    162		u32 mask;
    163	} sources[] = {
    164		{ "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
    165		{ "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
    166		{ "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
    167		{ "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
    168	};
    169
    170	const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
    171		       | DISPC_IRQ_VID1_FIFO_UNDERFLOW
    172		       | DISPC_IRQ_VID2_FIFO_UNDERFLOW
    173		       | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
    174	unsigned int i;
    175
    176	spin_lock(&priv->wait_lock);
    177	irqstatus &= priv->irq_mask & mask;
    178	spin_unlock(&priv->wait_lock);
    179
    180	if (!irqstatus)
    181		return;
    182
    183	if (!__ratelimit(&_rs))
    184		return;
    185
    186	DRM_ERROR("FIFO underflow on ");
    187
    188	for (i = 0; i < ARRAY_SIZE(sources); ++i) {
    189		if (sources[i].mask & irqstatus)
    190			pr_cont("%s ", sources[i].name);
    191	}
    192
    193	pr_cont("(0x%08x)\n", irqstatus);
    194}
    195
    196static void omap_irq_ocp_error_handler(struct drm_device *dev,
    197	u32 irqstatus)
    198{
    199	if (!(irqstatus & DISPC_IRQ_OCP_ERR))
    200		return;
    201
    202	dev_err_ratelimited(dev->dev, "OCP error\n");
    203}
    204
    205static irqreturn_t omap_irq_handler(int irq, void *arg)
    206{
    207	struct drm_device *dev = (struct drm_device *) arg;
    208	struct omap_drm_private *priv = dev->dev_private;
    209	struct omap_irq_wait *wait, *n;
    210	unsigned long flags;
    211	unsigned int id;
    212	u32 irqstatus;
    213
    214	irqstatus = dispc_read_irqstatus(priv->dispc);
    215	dispc_clear_irqstatus(priv->dispc, irqstatus);
    216	dispc_read_irqstatus(priv->dispc);	/* flush posted write */
    217
    218	VERB("irqs: %08x", irqstatus);
    219
    220	for (id = 0; id < priv->num_pipes; id++) {
    221		struct drm_crtc *crtc = priv->pipes[id].crtc;
    222		enum omap_channel channel = omap_crtc_channel(crtc);
    223
    224		if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
    225			drm_handle_vblank(dev, id);
    226			omap_crtc_vblank_irq(crtc);
    227		}
    228
    229		if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
    230			omap_crtc_error_irq(crtc, irqstatus);
    231
    232		if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
    233			omap_crtc_framedone_irq(crtc, irqstatus);
    234	}
    235
    236	omap_irq_ocp_error_handler(dev, irqstatus);
    237	omap_irq_fifo_underflow(priv, irqstatus);
    238
    239	spin_lock_irqsave(&priv->wait_lock, flags);
    240	list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
    241		if (wait->irqmask & irqstatus)
    242			omap_irq_wait_handler(wait);
    243	}
    244	spin_unlock_irqrestore(&priv->wait_lock, flags);
    245
    246	return IRQ_HANDLED;
    247}
    248
    249static const u32 omap_underflow_irqs[] = {
    250	[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
    251	[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
    252	[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
    253	[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
    254};
    255
    256int omap_drm_irq_install(struct drm_device *dev)
    257{
    258	struct omap_drm_private *priv = dev->dev_private;
    259	unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
    260	unsigned int max_planes;
    261	unsigned int i;
    262	int ret;
    263
    264	spin_lock_init(&priv->wait_lock);
    265	INIT_LIST_HEAD(&priv->wait_list);
    266
    267	priv->irq_mask = DISPC_IRQ_OCP_ERR;
    268
    269	max_planes = min(ARRAY_SIZE(priv->planes),
    270			 ARRAY_SIZE(omap_underflow_irqs));
    271	for (i = 0; i < max_planes; ++i) {
    272		if (priv->planes[i])
    273			priv->irq_mask |= omap_underflow_irqs[i];
    274	}
    275
    276	for (i = 0; i < num_mgrs; ++i)
    277		priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
    278
    279	dispc_runtime_get(priv->dispc);
    280	dispc_clear_irqstatus(priv->dispc, 0xffffffff);
    281	dispc_runtime_put(priv->dispc);
    282
    283	ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
    284	if (ret < 0)
    285		return ret;
    286
    287	priv->irq_enabled = true;
    288
    289	return 0;
    290}
    291
    292void omap_drm_irq_uninstall(struct drm_device *dev)
    293{
    294	struct omap_drm_private *priv = dev->dev_private;
    295
    296	if (!priv->irq_enabled)
    297		return;
    298
    299	priv->irq_enabled = false;
    300
    301	dispc_free_irq(priv->dispc, dev);
    302}