cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sdio_irq.c (8848B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * linux/drivers/mmc/core/sdio_irq.c
      4 *
      5 * Author:      Nicolas Pitre
      6 * Created:     June 18, 2007
      7 * Copyright:   MontaVista Software Inc.
      8 *
      9 * Copyright 2008 Pierre Ossman
     10 */
     11
     12#include <linux/kernel.h>
     13#include <linux/sched.h>
     14#include <uapi/linux/sched/types.h>
     15#include <linux/kthread.h>
     16#include <linux/export.h>
     17#include <linux/wait.h>
     18#include <linux/delay.h>
     19
     20#include <linux/mmc/core.h>
     21#include <linux/mmc/host.h>
     22#include <linux/mmc/card.h>
     23#include <linux/mmc/sdio.h>
     24#include <linux/mmc/sdio_func.h>
     25
     26#include "sdio_ops.h"
     27#include "core.h"
     28#include "card.h"
     29
     30static int sdio_get_pending_irqs(struct mmc_host *host, u8 *pending)
     31{
     32	struct mmc_card *card = host->card;
     33	int ret;
     34
     35	WARN_ON(!host->claimed);
     36
     37	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, pending);
     38	if (ret) {
     39		pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
     40		       mmc_card_id(card), ret);
     41		return ret;
     42	}
     43
     44	if (*pending && mmc_card_broken_irq_polling(card) &&
     45	    !(host->caps & MMC_CAP_SDIO_IRQ)) {
     46		unsigned char dummy;
     47
     48		/* A fake interrupt could be created when we poll SDIO_CCCR_INTx
     49		 * register with a Marvell SD8797 card. A dummy CMD52 read to
     50		 * function 0 register 0xff can avoid this.
     51		 */
     52		mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
     53	}
     54
     55	return 0;
     56}
     57
     58static int process_sdio_pending_irqs(struct mmc_host *host)
     59{
     60	struct mmc_card *card = host->card;
     61	int i, ret, count;
     62	bool sdio_irq_pending = host->sdio_irq_pending;
     63	unsigned char pending;
     64	struct sdio_func *func;
     65
     66	/* Don't process SDIO IRQs if the card is suspended. */
     67	if (mmc_card_suspended(card))
     68		return 0;
     69
     70	/* Clear the flag to indicate that we have processed the IRQ. */
     71	host->sdio_irq_pending = false;
     72
     73	/*
     74	 * Optimization, if there is only 1 function interrupt registered
     75	 * and we know an IRQ was signaled then call irq handler directly.
     76	 * Otherwise do the full probe.
     77	 */
     78	func = card->sdio_single_irq;
     79	if (func && sdio_irq_pending) {
     80		func->irq_handler(func);
     81		return 1;
     82	}
     83
     84	ret = sdio_get_pending_irqs(host, &pending);
     85	if (ret)
     86		return ret;
     87
     88	count = 0;
     89	for (i = 1; i <= 7; i++) {
     90		if (pending & (1 << i)) {
     91			func = card->sdio_func[i - 1];
     92			if (!func) {
     93				pr_warn("%s: pending IRQ for non-existent function\n",
     94					mmc_card_id(card));
     95				ret = -EINVAL;
     96			} else if (func->irq_handler) {
     97				func->irq_handler(func);
     98				count++;
     99			} else {
    100				pr_warn("%s: pending IRQ with no handler\n",
    101					sdio_func_id(func));
    102				ret = -EINVAL;
    103			}
    104		}
    105	}
    106
    107	if (count)
    108		return count;
    109
    110	return ret;
    111}
    112
    113static void sdio_run_irqs(struct mmc_host *host)
    114{
    115	mmc_claim_host(host);
    116	if (host->sdio_irqs) {
    117		process_sdio_pending_irqs(host);
    118		if (!host->sdio_irq_pending)
    119			host->ops->ack_sdio_irq(host);
    120	}
    121	mmc_release_host(host);
    122}
    123
    124void sdio_irq_work(struct work_struct *work)
    125{
    126	struct mmc_host *host =
    127		container_of(work, struct mmc_host, sdio_irq_work.work);
    128
    129	sdio_run_irqs(host);
    130}
    131
    132void sdio_signal_irq(struct mmc_host *host)
    133{
    134	host->sdio_irq_pending = true;
    135	queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
    136}
    137EXPORT_SYMBOL_GPL(sdio_signal_irq);
    138
    139static int sdio_irq_thread(void *_host)
    140{
    141	struct mmc_host *host = _host;
    142	unsigned long period, idle_period;
    143	int ret;
    144
    145	sched_set_fifo_low(current);
    146
    147	/*
    148	 * We want to allow for SDIO cards to work even on non SDIO
    149	 * aware hosts.  One thing that non SDIO host cannot do is
    150	 * asynchronous notification of pending SDIO card interrupts
    151	 * hence we poll for them in that case.
    152	 */
    153	idle_period = msecs_to_jiffies(10);
    154	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
    155		MAX_SCHEDULE_TIMEOUT : idle_period;
    156
    157	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
    158		 mmc_hostname(host), period);
    159
    160	do {
    161		/*
    162		 * We claim the host here on drivers behalf for a couple
    163		 * reasons:
    164		 *
    165		 * 1) it is already needed to retrieve the CCCR_INTx;
    166		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
    167		 * 3) we need to control the abort condition locally.
    168		 *
    169		 * Just like traditional hard IRQ handlers, we expect SDIO
    170		 * IRQ handlers to be quick and to the point, so that the
    171		 * holding of the host lock does not cover too much work
    172		 * that doesn't require that lock to be held.
    173		 */
    174		ret = __mmc_claim_host(host, NULL,
    175				       &host->sdio_irq_thread_abort);
    176		if (ret)
    177			break;
    178		ret = process_sdio_pending_irqs(host);
    179		mmc_release_host(host);
    180
    181		/*
    182		 * Give other threads a chance to run in the presence of
    183		 * errors.
    184		 */
    185		if (ret < 0) {
    186			set_current_state(TASK_INTERRUPTIBLE);
    187			if (!kthread_should_stop())
    188				schedule_timeout(HZ);
    189			set_current_state(TASK_RUNNING);
    190		}
    191
    192		/*
    193		 * Adaptive polling frequency based on the assumption
    194		 * that an interrupt will be closely followed by more.
    195		 * This has a substantial benefit for network devices.
    196		 */
    197		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
    198			if (ret > 0)
    199				period /= 2;
    200			else {
    201				period++;
    202				if (period > idle_period)
    203					period = idle_period;
    204			}
    205		}
    206
    207		set_current_state(TASK_INTERRUPTIBLE);
    208		if (host->caps & MMC_CAP_SDIO_IRQ)
    209			host->ops->enable_sdio_irq(host, 1);
    210		if (!kthread_should_stop())
    211			schedule_timeout(period);
    212		set_current_state(TASK_RUNNING);
    213	} while (!kthread_should_stop());
    214
    215	if (host->caps & MMC_CAP_SDIO_IRQ)
    216		host->ops->enable_sdio_irq(host, 0);
    217
    218	pr_debug("%s: IRQ thread exiting with code %d\n",
    219		 mmc_hostname(host), ret);
    220
    221	return ret;
    222}
    223
    224static int sdio_card_irq_get(struct mmc_card *card)
    225{
    226	struct mmc_host *host = card->host;
    227
    228	WARN_ON(!host->claimed);
    229
    230	if (!host->sdio_irqs++) {
    231		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
    232			atomic_set(&host->sdio_irq_thread_abort, 0);
    233			host->sdio_irq_thread =
    234				kthread_run(sdio_irq_thread, host,
    235					    "ksdioirqd/%s", mmc_hostname(host));
    236			if (IS_ERR(host->sdio_irq_thread)) {
    237				int err = PTR_ERR(host->sdio_irq_thread);
    238				host->sdio_irqs--;
    239				return err;
    240			}
    241		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
    242			host->ops->enable_sdio_irq(host, 1);
    243		}
    244	}
    245
    246	return 0;
    247}
    248
    249static int sdio_card_irq_put(struct mmc_card *card)
    250{
    251	struct mmc_host *host = card->host;
    252
    253	WARN_ON(!host->claimed);
    254
    255	if (host->sdio_irqs < 1)
    256		return -EINVAL;
    257
    258	if (!--host->sdio_irqs) {
    259		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
    260			atomic_set(&host->sdio_irq_thread_abort, 1);
    261			kthread_stop(host->sdio_irq_thread);
    262		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
    263			host->ops->enable_sdio_irq(host, 0);
    264		}
    265	}
    266
    267	return 0;
    268}
    269
    270/* If there is only 1 function registered set sdio_single_irq */
    271static void sdio_single_irq_set(struct mmc_card *card)
    272{
    273	struct sdio_func *func;
    274	int i;
    275
    276	card->sdio_single_irq = NULL;
    277	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
    278	    card->host->sdio_irqs == 1) {
    279		for (i = 0; i < card->sdio_funcs; i++) {
    280			func = card->sdio_func[i];
    281			if (func && func->irq_handler) {
    282				card->sdio_single_irq = func;
    283				break;
    284			}
    285		}
    286	}
    287}
    288
    289/**
    290 *	sdio_claim_irq - claim the IRQ for a SDIO function
    291 *	@func: SDIO function
    292 *	@handler: IRQ handler callback
    293 *
    294 *	Claim and activate the IRQ for the given SDIO function. The provided
    295 *	handler will be called when that IRQ is asserted.  The host is always
    296 *	claimed already when the handler is called so the handler should not
    297 *	call sdio_claim_host() or sdio_release_host().
    298 */
    299int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
    300{
    301	int ret;
    302	unsigned char reg;
    303
    304	if (!func)
    305		return -EINVAL;
    306
    307	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
    308
    309	if (func->irq_handler) {
    310		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
    311		return -EBUSY;
    312	}
    313
    314	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
    315	if (ret)
    316		return ret;
    317
    318	reg |= 1 << func->num;
    319
    320	reg |= 1; /* Master interrupt enable */
    321
    322	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
    323	if (ret)
    324		return ret;
    325
    326	func->irq_handler = handler;
    327	ret = sdio_card_irq_get(func->card);
    328	if (ret)
    329		func->irq_handler = NULL;
    330	sdio_single_irq_set(func->card);
    331
    332	return ret;
    333}
    334EXPORT_SYMBOL_GPL(sdio_claim_irq);
    335
    336/**
    337 *	sdio_release_irq - release the IRQ for a SDIO function
    338 *	@func: SDIO function
    339 *
    340 *	Disable and release the IRQ for the given SDIO function.
    341 */
    342int sdio_release_irq(struct sdio_func *func)
    343{
    344	int ret;
    345	unsigned char reg;
    346
    347	if (!func)
    348		return -EINVAL;
    349
    350	pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
    351
    352	if (func->irq_handler) {
    353		func->irq_handler = NULL;
    354		sdio_card_irq_put(func->card);
    355		sdio_single_irq_set(func->card);
    356	}
    357
    358	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
    359	if (ret)
    360		return ret;
    361
    362	reg &= ~(1 << func->num);
    363
    364	/* Disable master interrupt with the last function interrupt */
    365	if (!(reg & 0xFE))
    366		reg = 0;
    367
    368	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
    369	if (ret)
    370		return ret;
    371
    372	return 0;
    373}
    374EXPORT_SYMBOL_GPL(sdio_release_irq);
    375