cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

csio_isr.c (15197B)


      1/*
      2 * This file is part of the Chelsio FCoE driver for Linux.
      3 *
      4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
      5 *
      6 * This software is available to you under a choice of one of two
      7 * licenses.  You may choose to be licensed under the terms of the GNU
      8 * General Public License (GPL) Version 2, available from the file
      9 * COPYING in the main directory of this source tree, or the
     10 * OpenIB.org BSD license below:
     11 *
     12 *     Redistribution and use in source and binary forms, with or
     13 *     without modification, are permitted provided that the following
     14 *     conditions are met:
     15 *
     16 *      - Redistributions of source code must retain the above
     17 *        copyright notice, this list of conditions and the following
     18 *        disclaimer.
     19 *
     20 *      - Redistributions in binary form must reproduce the above
     21 *        copyright notice, this list of conditions and the following
     22 *        disclaimer in the documentation and/or other materials
     23 *        provided with the distribution.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     32 * SOFTWARE.
     33 */
     34
     35#include <linux/kernel.h>
     36#include <linux/pci.h>
     37#include <linux/interrupt.h>
     38#include <linux/cpumask.h>
     39#include <linux/string.h>
     40
     41#include "csio_init.h"
     42#include "csio_hw.h"
     43
     44static irqreturn_t
     45csio_nondata_isr(int irq, void *dev_id)
     46{
     47	struct csio_hw *hw = (struct csio_hw *) dev_id;
     48	int rv;
     49	unsigned long flags;
     50
     51	if (unlikely(!hw))
     52		return IRQ_NONE;
     53
     54	if (unlikely(pci_channel_offline(hw->pdev))) {
     55		CSIO_INC_STATS(hw, n_pcich_offline);
     56		return IRQ_NONE;
     57	}
     58
     59	spin_lock_irqsave(&hw->lock, flags);
     60	csio_hw_slow_intr_handler(hw);
     61	rv = csio_mb_isr_handler(hw);
     62
     63	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
     64		hw->flags |= CSIO_HWF_FWEVT_PENDING;
     65		spin_unlock_irqrestore(&hw->lock, flags);
     66		schedule_work(&hw->evtq_work);
     67		return IRQ_HANDLED;
     68	}
     69	spin_unlock_irqrestore(&hw->lock, flags);
     70	return IRQ_HANDLED;
     71}
     72
     73/*
     74 * csio_fwevt_handler - Common FW event handler routine.
     75 * @hw: HW module.
     76 *
     77 * This is the ISR for FW events. It is shared b/w MSIX
     78 * and INTx handlers.
     79 */
     80static void
     81csio_fwevt_handler(struct csio_hw *hw)
     82{
     83	int rv;
     84	unsigned long flags;
     85
     86	rv = csio_fwevtq_handler(hw);
     87
     88	spin_lock_irqsave(&hw->lock, flags);
     89	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
     90		hw->flags |= CSIO_HWF_FWEVT_PENDING;
     91		spin_unlock_irqrestore(&hw->lock, flags);
     92		schedule_work(&hw->evtq_work);
     93		return;
     94	}
     95	spin_unlock_irqrestore(&hw->lock, flags);
     96
     97} /* csio_fwevt_handler */
     98
     99/*
    100 * csio_fwevt_isr() - FW events MSIX ISR
    101 * @irq:
    102 * @dev_id:
    103 *
    104 * Process WRs on the FW event queue.
    105 *
    106 */
    107static irqreturn_t
    108csio_fwevt_isr(int irq, void *dev_id)
    109{
    110	struct csio_hw *hw = (struct csio_hw *) dev_id;
    111
    112	if (unlikely(!hw))
    113		return IRQ_NONE;
    114
    115	if (unlikely(pci_channel_offline(hw->pdev))) {
    116		CSIO_INC_STATS(hw, n_pcich_offline);
    117		return IRQ_NONE;
    118	}
    119
    120	csio_fwevt_handler(hw);
    121
    122	return IRQ_HANDLED;
    123}
    124
    125/*
    126 * csio_fwevt_isr() - INTx wrapper for handling FW events.
    127 * @irq:
    128 * @dev_id:
    129 */
    130void
    131csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
    132			   struct csio_fl_dma_buf *flb, void *priv)
    133{
    134	csio_fwevt_handler(hw);
    135} /* csio_fwevt_intx_handler */
    136
    137/*
    138 * csio_process_scsi_cmpl - Process a SCSI WR completion.
    139 * @hw: HW module.
    140 * @wr: The completed WR from the ingress queue.
    141 * @len: Length of the WR.
    142 * @flb: Freelist buffer array.
    143 *
    144 */
    145static void
    146csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
    147			struct csio_fl_dma_buf *flb, void *cbfn_q)
    148{
    149	struct csio_ioreq *ioreq;
    150	uint8_t *scsiwr;
    151	uint8_t subop;
    152	void *cmnd;
    153	unsigned long flags;
    154
    155	ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
    156	if (likely(ioreq)) {
    157		if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
    158			subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
    159					((struct fw_scsi_abrt_cls_wr *)
    160					    scsiwr)->sub_opcode_to_chk_all_io);
    161
    162			csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
    163				    subop ? "Close" : "Abort",
    164				    ioreq, ioreq->wr_status);
    165
    166			spin_lock_irqsave(&hw->lock, flags);
    167			if (subop)
    168				csio_scsi_closed(ioreq,
    169						 (struct list_head *)cbfn_q);
    170			else
    171				csio_scsi_aborted(ioreq,
    172						  (struct list_head *)cbfn_q);
    173			/*
    174			 * We call scsi_done for I/Os that driver thinks aborts
    175			 * have timed out. If there is a race caused by FW
    176			 * completing abort at the exact same time that the
    177			 * driver has deteced the abort timeout, the following
    178			 * check prevents calling of scsi_done twice for the
    179			 * same command: once from the eh_abort_handler, another
    180			 * from csio_scsi_isr_handler(). This also avoids the
    181			 * need to check if csio_scsi_cmnd(req) is NULL in the
    182			 * fast path.
    183			 */
    184			cmnd = csio_scsi_cmnd(ioreq);
    185			if (unlikely(cmnd == NULL))
    186				list_del_init(&ioreq->sm.sm_list);
    187
    188			spin_unlock_irqrestore(&hw->lock, flags);
    189
    190			if (unlikely(cmnd == NULL))
    191				csio_put_scsi_ioreq_lock(hw,
    192						csio_hw_to_scsim(hw), ioreq);
    193		} else {
    194			spin_lock_irqsave(&hw->lock, flags);
    195			csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
    196			spin_unlock_irqrestore(&hw->lock, flags);
    197		}
    198	}
    199}
    200
    201/*
    202 * csio_scsi_isr_handler() - Common SCSI ISR handler.
    203 * @iq: Ingress queue pointer.
    204 *
    205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
    206 * by calling csio_wr_process_iq_idx. If there are completions on the
    207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
    208 * Once done, add these completions onto the freelist.
    209 * This routine is shared b/w MSIX and INTx.
    210 */
    211static inline irqreturn_t
    212csio_scsi_isr_handler(struct csio_q *iq)
    213{
    214	struct csio_hw *hw = (struct csio_hw *)iq->owner;
    215	LIST_HEAD(cbfn_q);
    216	struct list_head *tmp;
    217	struct csio_scsim *scm;
    218	struct csio_ioreq *ioreq;
    219	int isr_completions = 0;
    220
    221	scm = csio_hw_to_scsim(hw);
    222
    223	if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
    224					&cbfn_q) != 0))
    225		return IRQ_NONE;
    226
    227	/* Call back the completion routines */
    228	list_for_each(tmp, &cbfn_q) {
    229		ioreq = (struct csio_ioreq *)tmp;
    230		isr_completions++;
    231		ioreq->io_cbfn(hw, ioreq);
    232		/* Release ddp buffer if used for this req */
    233		if (unlikely(ioreq->dcopy))
    234			csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
    235						    ioreq->nsge);
    236	}
    237
    238	if (isr_completions) {
    239		/* Return the ioreqs back to ioreq->freelist */
    240		csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
    241					      isr_completions);
    242	}
    243
    244	return IRQ_HANDLED;
    245}
    246
    247/*
    248 * csio_scsi_isr() - SCSI MSIX handler
    249 * @irq:
    250 * @dev_id:
    251 *
    252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
    253 * for handling SCSI completions.
    254 */
    255static irqreturn_t
    256csio_scsi_isr(int irq, void *dev_id)
    257{
    258	struct csio_q *iq = (struct csio_q *) dev_id;
    259	struct csio_hw *hw;
    260
    261	if (unlikely(!iq))
    262		return IRQ_NONE;
    263
    264	hw = (struct csio_hw *)iq->owner;
    265
    266	if (unlikely(pci_channel_offline(hw->pdev))) {
    267		CSIO_INC_STATS(hw, n_pcich_offline);
    268		return IRQ_NONE;
    269	}
    270
    271	csio_scsi_isr_handler(iq);
    272
    273	return IRQ_HANDLED;
    274}
    275
    276/*
    277 * csio_scsi_intx_handler() - SCSI INTx handler
    278 * @irq:
    279 * @dev_id:
    280 *
    281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
    282 * for handling SCSI completions.
    283 */
    284void
    285csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
    286			struct csio_fl_dma_buf *flb, void *priv)
    287{
    288	struct csio_q *iq = priv;
    289
    290	csio_scsi_isr_handler(iq);
    291
    292} /* csio_scsi_intx_handler */
    293
    294/*
    295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
    296 * @irq:
    297 * @dev_id:
    298 *
    299 *
    300 */
    301static irqreturn_t
    302csio_fcoe_isr(int irq, void *dev_id)
    303{
    304	struct csio_hw *hw = (struct csio_hw *) dev_id;
    305	struct csio_q *intx_q = NULL;
    306	int rv;
    307	irqreturn_t ret = IRQ_NONE;
    308	unsigned long flags;
    309
    310	if (unlikely(!hw))
    311		return IRQ_NONE;
    312
    313	if (unlikely(pci_channel_offline(hw->pdev))) {
    314		CSIO_INC_STATS(hw, n_pcich_offline);
    315		return IRQ_NONE;
    316	}
    317
    318	/* Disable the interrupt for this PCI function. */
    319	if (hw->intr_mode == CSIO_IM_INTX)
    320		csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
    321
    322	/*
    323	 * The read in the following function will flush the
    324	 * above write.
    325	 */
    326	if (csio_hw_slow_intr_handler(hw))
    327		ret = IRQ_HANDLED;
    328
    329	/* Get the INTx Forward interrupt IQ. */
    330	intx_q = csio_get_q(hw, hw->intr_iq_idx);
    331
    332	CSIO_DB_ASSERT(intx_q);
    333
    334	/* IQ handler is not possible for intx_q, hence pass in NULL */
    335	if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
    336		ret = IRQ_HANDLED;
    337
    338	spin_lock_irqsave(&hw->lock, flags);
    339	rv = csio_mb_isr_handler(hw);
    340	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
    341		hw->flags |= CSIO_HWF_FWEVT_PENDING;
    342		spin_unlock_irqrestore(&hw->lock, flags);
    343		schedule_work(&hw->evtq_work);
    344		return IRQ_HANDLED;
    345	}
    346	spin_unlock_irqrestore(&hw->lock, flags);
    347
    348	return ret;
    349}
    350
    351static void
    352csio_add_msix_desc(struct csio_hw *hw)
    353{
    354	int i;
    355	struct csio_msix_entries *entryp = &hw->msix_entries[0];
    356	int k = CSIO_EXTRA_VECS;
    357	int len = sizeof(entryp->desc) - 1;
    358	int cnt = hw->num_sqsets + k;
    359
    360	/* Non-data vector */
    361	memset(entryp->desc, 0, len + 1);
    362	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
    363		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
    364
    365	entryp++;
    366	memset(entryp->desc, 0, len + 1);
    367	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
    368		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
    369	entryp++;
    370
    371	/* Name SCSI vecs */
    372	for (i = k; i < cnt; i++, entryp++) {
    373		memset(entryp->desc, 0, len + 1);
    374		snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
    375			 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
    376			 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
    377	}
    378}
    379
    380int
    381csio_request_irqs(struct csio_hw *hw)
    382{
    383	int rv, i, j, k = 0;
    384	struct csio_msix_entries *entryp = &hw->msix_entries[0];
    385	struct csio_scsi_cpu_info *info;
    386	struct pci_dev *pdev = hw->pdev;
    387
    388	if (hw->intr_mode != CSIO_IM_MSIX) {
    389		rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
    390				hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
    391				KBUILD_MODNAME, hw);
    392		if (rv) {
    393			csio_err(hw, "Failed to allocate interrupt line.\n");
    394			goto out_free_irqs;
    395		}
    396
    397		goto out;
    398	}
    399
    400	/* Add the MSIX vector descriptions */
    401	csio_add_msix_desc(hw);
    402
    403	rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
    404			 entryp[k].desc, hw);
    405	if (rv) {
    406		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
    407			 pci_irq_vector(pdev, k), rv);
    408		goto out_free_irqs;
    409	}
    410
    411	entryp[k++].dev_id = hw;
    412
    413	rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
    414			 entryp[k].desc, hw);
    415	if (rv) {
    416		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
    417			 pci_irq_vector(pdev, k), rv);
    418		goto out_free_irqs;
    419	}
    420
    421	entryp[k++].dev_id = (void *)hw;
    422
    423	/* Allocate IRQs for SCSI */
    424	for (i = 0; i < hw->num_pports; i++) {
    425		info = &hw->scsi_cpu_info[i];
    426		for (j = 0; j < info->max_cpus; j++, k++) {
    427			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
    428			struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
    429
    430			rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
    431					 entryp[k].desc, q);
    432			if (rv) {
    433				csio_err(hw,
    434				       "IRQ request failed for vec %d err:%d\n",
    435				       pci_irq_vector(pdev, k), rv);
    436				goto out_free_irqs;
    437			}
    438
    439			entryp[k].dev_id = q;
    440
    441		} /* for all scsi cpus */
    442	} /* for all ports */
    443
    444out:
    445	hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
    446	return 0;
    447
    448out_free_irqs:
    449	for (i = 0; i < k; i++)
    450		free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
    451	pci_free_irq_vectors(hw->pdev);
    452	return -EINVAL;
    453}
    454
    455/* Reduce per-port max possible CPUs */
    456static void
    457csio_reduce_sqsets(struct csio_hw *hw, int cnt)
    458{
    459	int i;
    460	struct csio_scsi_cpu_info *info;
    461
    462	while (cnt < hw->num_sqsets) {
    463		for (i = 0; i < hw->num_pports; i++) {
    464			info = &hw->scsi_cpu_info[i];
    465			if (info->max_cpus > 1) {
    466				info->max_cpus--;
    467				hw->num_sqsets--;
    468				if (hw->num_sqsets <= cnt)
    469					break;
    470			}
    471		}
    472	}
    473
    474	csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
    475}
    476
    477static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
    478{
    479	struct csio_hw *hw = affd->priv;
    480	u8 i;
    481
    482	if (!nvecs)
    483		return;
    484
    485	if (nvecs < hw->num_pports) {
    486		affd->nr_sets = 1;
    487		affd->set_size[0] = nvecs;
    488		return;
    489	}
    490
    491	affd->nr_sets = hw->num_pports;
    492	for (i = 0; i < hw->num_pports; i++)
    493		affd->set_size[i] = nvecs / hw->num_pports;
    494}
    495
    496static int
    497csio_enable_msix(struct csio_hw *hw)
    498{
    499	int i, j, k, n, min, cnt;
    500	int extra = CSIO_EXTRA_VECS;
    501	struct csio_scsi_cpu_info *info;
    502	struct irq_affinity desc = {
    503		.pre_vectors = CSIO_EXTRA_VECS,
    504		.calc_sets = csio_calc_sets,
    505		.priv = hw,
    506	};
    507
    508	if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
    509		return -ENOSPC;
    510
    511	min = hw->num_pports + extra;
    512	cnt = hw->num_sqsets + extra;
    513
    514	/* Max vectors required based on #niqs configured in fw */
    515	if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
    516		cnt = min_t(uint8_t, hw->cfg_niq, cnt);
    517
    518	csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
    519
    520	cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
    521			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
    522	if (cnt < 0)
    523		return cnt;
    524
    525	if (cnt < (hw->num_sqsets + extra)) {
    526		csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
    527		csio_reduce_sqsets(hw, cnt - extra);
    528	}
    529
    530	/* Distribute vectors */
    531	k = 0;
    532	csio_set_nondata_intr_idx(hw, k);
    533	csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
    534	csio_set_fwevt_intr_idx(hw, k++);
    535
    536	for (i = 0; i < hw->num_pports; i++) {
    537		info = &hw->scsi_cpu_info[i];
    538
    539		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
    540			n = (j % info->max_cpus) +  k;
    541			hw->sqset[i][j].intr_idx = n;
    542		}
    543
    544		k += info->max_cpus;
    545	}
    546
    547	return 0;
    548}
    549
    550void
    551csio_intr_enable(struct csio_hw *hw)
    552{
    553	hw->intr_mode = CSIO_IM_NONE;
    554	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
    555
    556	/* Try MSIX, then MSI or fall back to INTx */
    557	if ((csio_msi == 2) && !csio_enable_msix(hw))
    558		hw->intr_mode = CSIO_IM_MSIX;
    559	else {
    560		/* Max iqs required based on #niqs configured in fw */
    561		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
    562			!csio_is_hw_master(hw)) {
    563			int extra = CSIO_EXTRA_MSI_IQS;
    564
    565			if (hw->cfg_niq < (hw->num_sqsets + extra)) {
    566				csio_dbg(hw, "Reducing sqsets to %d\n",
    567					 hw->cfg_niq - extra);
    568				csio_reduce_sqsets(hw, hw->cfg_niq - extra);
    569			}
    570		}
    571
    572		if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
    573			hw->intr_mode = CSIO_IM_MSI;
    574		else
    575			hw->intr_mode = CSIO_IM_INTX;
    576	}
    577
    578	csio_dbg(hw, "Using %s interrupt mode.\n",
    579		(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
    580		((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
    581}
    582
    583void
    584csio_intr_disable(struct csio_hw *hw, bool free)
    585{
    586	csio_hw_intr_disable(hw);
    587
    588	if (free) {
    589		int i;
    590
    591		switch (hw->intr_mode) {
    592		case CSIO_IM_MSIX:
    593			for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
    594				free_irq(pci_irq_vector(hw->pdev, i),
    595					 hw->msix_entries[i].dev_id);
    596			}
    597			break;
    598		case CSIO_IM_MSI:
    599		case CSIO_IM_INTX:
    600			free_irq(pci_irq_vector(hw->pdev, 0), hw);
    601			break;
    602		default:
    603			break;
    604		}
    605	}
    606
    607	pci_free_irq_vectors(hw->pdev);
    608	hw->intr_mode = CSIO_IM_NONE;
    609	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
    610}