cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qla_inline.h (11358B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * QLogic Fibre Channel HBA Driver
      4 * Copyright (c)  2003-2014 QLogic Corporation
      5 */
      6
      7#include "qla_target.h"
      8/**
      9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
     10 * Continuation Type 1 IOCBs to allocate.
     11 *
     12 * @vha: HA context
     13 * @dsds: number of data segment descriptors needed
     14 *
     15 * Returns the number of IOCB entries needed to store @dsds.
     16 */
     17static inline uint16_t
     18qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
     19{
     20	uint16_t iocbs;
     21
     22	iocbs = 1;
     23	if (dsds > 1) {
     24		iocbs += (dsds - 1) / 5;
     25		if ((dsds - 1) % 5)
     26			iocbs++;
     27	}
     28	return iocbs;
     29}
     30
     31/*
     32 * qla2x00_debounce_register
     33 *      Debounce register.
     34 *
     35 * Input:
     36 *      port = register address.
     37 *
     38 * Returns:
     39 *      register value.
     40 */
     41static __inline__ uint16_t
     42qla2x00_debounce_register(volatile __le16 __iomem *addr)
     43{
     44	volatile uint16_t first;
     45	volatile uint16_t second;
     46
     47	do {
     48		first = rd_reg_word(addr);
     49		barrier();
     50		cpu_relax();
     51		second = rd_reg_word(addr);
     52	} while (first != second);
     53
     54	return (first);
     55}
     56
     57static inline void
     58qla2x00_poll(struct rsp_que *rsp)
     59{
     60	struct qla_hw_data *ha = rsp->hw;
     61
     62	if (IS_P3P_TYPE(ha))
     63		qla82xx_poll(0, rsp);
     64	else
     65		ha->isp_ops->intr_handler(0, rsp);
     66}
     67
     68static inline uint8_t *
     69host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
     70{
     71       uint32_t *ifcp = (uint32_t *) fcp;
     72       uint32_t *ofcp = (uint32_t *) fcp;
     73       uint32_t iter = bsize >> 2;
     74
     75       for (; iter ; iter--)
     76               *ofcp++ = swab32(*ifcp++);
     77
     78       return fcp;
     79}
     80
     81static inline void
     82host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
     83{
     84	uint32_t *isrc = (uint32_t *) src;
     85	__le32 *odest = (__le32 *) dst;
     86	uint32_t iter = bsize >> 2;
     87
     88	for ( ; iter--; isrc++)
     89		*odest++ = cpu_to_le32(*isrc);
     90}
     91
     92static inline void
     93qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
     94{
     95	struct dsd_dma *dsd, *tdsd;
     96
     97	/* clean up allocated prev pool */
     98	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
     99		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
    100		    dsd->dsd_list_dma);
    101		list_del(&dsd->list);
    102		kfree(dsd);
    103	}
    104	INIT_LIST_HEAD(&ctx->dsd_list);
    105}
    106
    107static inline void
    108qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
    109{
    110	int old_val;
    111	uint8_t shiftbits, mask;
    112
    113	/* This will have to change when the max no. of states > 16 */
    114	shiftbits = 4;
    115	mask = (1 << shiftbits) - 1;
    116
    117	fcport->disc_state = state;
    118	while (1) {
    119		old_val = atomic_read(&fcport->shadow_disc_state);
    120		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
    121		    old_val, (old_val << shiftbits) | state)) {
    122			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
    123			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
    124			    fcport->port_name, port_dstate_str[old_val & mask],
    125			    port_dstate_str[state], fcport->d_id.b24);
    126			return;
    127		}
    128	}
    129}
    130
    131static inline int
    132qla2x00_hba_err_chk_enabled(srb_t *sp)
    133{
    134	/*
    135	 * Uncomment when corresponding SCSI changes are done.
    136	 *
    137	if (!sp->cmd->prot_chk)
    138		return 0;
    139	 *
    140	 */
    141	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
    142	case SCSI_PROT_READ_STRIP:
    143	case SCSI_PROT_WRITE_INSERT:
    144		if (ql2xenablehba_err_chk >= 1)
    145			return 1;
    146		break;
    147	case SCSI_PROT_READ_PASS:
    148	case SCSI_PROT_WRITE_PASS:
    149		if (ql2xenablehba_err_chk >= 2)
    150			return 1;
    151		break;
    152	case SCSI_PROT_READ_INSERT:
    153	case SCSI_PROT_WRITE_STRIP:
    154		return 1;
    155	}
    156	return 0;
    157}
    158
    159static inline int
    160qla2x00_reset_active(scsi_qla_host_t *vha)
    161{
    162	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
    163
    164	/* Test appropriate base-vha and vha flags. */
    165	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
    166	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
    167	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
    168	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
    169	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
    170}
    171
    172static inline int
    173qla2x00_chip_is_down(scsi_qla_host_t *vha)
    174{
    175	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
    176}
    177
    178static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
    179			    struct qla_qpair *qpair, fc_port_t *fcport)
    180{
    181	memset(sp, 0, sizeof(*sp));
    182	sp->fcport = fcport;
    183	sp->iocbs = 1;
    184	sp->vha = vha;
    185	sp->qpair = qpair;
    186	sp->cmd_type = TYPE_SRB;
    187	/* ref : INIT - normal flow */
    188	kref_init(&sp->cmd_kref);
    189	INIT_LIST_HEAD(&sp->elem);
    190}
    191
    192static inline srb_t *
    193qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
    194    fc_port_t *fcport, gfp_t flag)
    195{
    196	srb_t *sp = NULL;
    197	uint8_t bail;
    198
    199	QLA_QPAIR_MARK_BUSY(qpair, bail);
    200	if (unlikely(bail))
    201		return NULL;
    202
    203	sp = mempool_alloc(qpair->srb_mempool, flag);
    204	if (sp)
    205		qla2xxx_init_sp(sp, vha, qpair, fcport);
    206	else
    207		QLA_QPAIR_MARK_NOT_BUSY(qpair);
    208	return sp;
    209}
    210
    211void qla2xxx_rel_done_warning(srb_t *sp, int res);
    212void qla2xxx_rel_free_warning(srb_t *sp);
    213
    214static inline void
    215qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
    216{
    217	sp->qpair = NULL;
    218	sp->done = qla2xxx_rel_done_warning;
    219	sp->free = qla2xxx_rel_free_warning;
    220	mempool_free(sp, qpair->srb_mempool);
    221	QLA_QPAIR_MARK_NOT_BUSY(qpair);
    222}
    223
    224static inline srb_t *
    225qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
    226{
    227	srb_t *sp = NULL;
    228	uint8_t bail;
    229	struct qla_qpair *qpair;
    230
    231	QLA_VHA_MARK_BUSY(vha, bail);
    232	if (unlikely(bail))
    233		return NULL;
    234
    235	qpair = vha->hw->base_qpair;
    236	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
    237	if (!sp)
    238		goto done;
    239
    240	sp->vha = vha;
    241done:
    242	if (!sp)
    243		QLA_VHA_MARK_NOT_BUSY(vha);
    244	return sp;
    245}
    246
    247static inline void
    248qla2x00_rel_sp(srb_t *sp)
    249{
    250	QLA_VHA_MARK_NOT_BUSY(sp->vha);
    251	qla2xxx_rel_qpair_sp(sp->qpair, sp);
    252}
    253
    254static inline int
    255qla2x00_gid_list_size(struct qla_hw_data *ha)
    256{
    257	if (IS_QLAFX00(ha))
    258		return sizeof(uint32_t) * 32;
    259	else
    260		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
    261}
    262
    263static inline void
    264qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
    265{
    266	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
    267	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
    268		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
    269		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
    270		complete(&ha->mbx_intr_comp);
    271	}
    272}
    273
    274static inline void
    275qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
    276{
    277	u8 scope;
    278	u16 qual;
    279#define SQ_SCOPE_MASK		0xc000 /* SAM-6 rev5 5.3.2 */
    280#define SQ_SCOPE_SHIFT		14
    281#define SQ_QUAL_MASK		0x3fff
    282
    283#define SQ_MAX_WAIT_SEC		60 /* Max I/O hold off time in seconds. */
    284#define SQ_MAX_WAIT_TIME	(SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
    285
    286	if (!sts_qual) /* Common case. */
    287		return;
    288
    289	scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
    290	/* Handle only scope 1 or 2, which is for I-T nexus. */
    291	if (scope != 1 && scope != 2)
    292		return;
    293
    294	/* Skip processing, if retry delay timer is already in effect. */
    295	if (fcport->retry_delay_timestamp &&
    296	    time_before(jiffies, fcport->retry_delay_timestamp))
    297		return;
    298
    299	qual = sts_qual & SQ_QUAL_MASK;
    300	if (qual < 1 || qual > 0x3fef)
    301		return;
    302	qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
    303
    304	/* qual is expressed in 100ms increments. */
    305	fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
    306
    307	ql_log(ql_log_warn, fcport->vha, 0x5101,
    308	       "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
    309	       fcport->port_name, sts_qual, qual * 100);
    310}
    311
    312static inline bool
    313qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
    314{
    315	if (qla_ini_mode_enabled(vha) &&
    316	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
    317		return true;
    318	else if (qla_tgt_mode_enabled(vha) &&
    319	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
    320		return true;
    321	else if (qla_dual_mode_enabled(vha) &&
    322	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
    323		return true;
    324	else
    325		return false;
    326}
    327
    328static inline void
    329qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
    330{
    331	qpair->cpuid = cpuid;
    332
    333	if (!list_empty(&qpair->hints_list)) {
    334		struct qla_qpair_hint *h;
    335
    336		list_for_each_entry(h, &qpair->hints_list, hint_elem)
    337			h->cpuid = qpair->cpuid;
    338	}
    339}
    340
    341static inline struct qla_qpair_hint *
    342qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
    343{
    344	struct qla_qpair_hint *h;
    345	u16 i;
    346
    347	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
    348		h = &tgt->qphints[i];
    349		if (h->qpair == qpair)
    350			return h;
    351	}
    352
    353	return NULL;
    354}
    355
    356static inline void
    357qla_83xx_start_iocbs(struct qla_qpair *qpair)
    358{
    359	struct req_que *req = qpair->req;
    360
    361	req->ring_index++;
    362	if (req->ring_index == req->length) {
    363		req->ring_index = 0;
    364		req->ring_ptr = req->ring;
    365	} else
    366		req->ring_ptr++;
    367
    368	wrt_reg_dword(req->req_q_in, req->ring_index);
    369}
    370
    371static inline int
    372qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
    373{
    374	uint32_t data;
    375
    376	data =
    377	    ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
    378
    379
    380	return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
    381}
    382
    383enum {
    384	RESOURCE_NONE,
    385	RESOURCE_INI,
    386};
    387
    388static inline int
    389qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
    390{
    391	u16 iocbs_used, i;
    392	struct qla_hw_data *ha = qp->vha->hw;
    393
    394	if (!ql2xenforce_iocb_limit) {
    395		iores->res_type = RESOURCE_NONE;
    396		return 0;
    397	}
    398
    399	if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
    400		qp->fwres.iocbs_used += iores->iocb_cnt;
    401		return 0;
    402	} else {
    403		/* no need to acquire qpair lock. It's just rough calculation */
    404		iocbs_used = ha->base_qpair->fwres.iocbs_used;
    405		for (i = 0; i < ha->max_qpairs; i++) {
    406			if (ha->queue_pair_map[i])
    407				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
    408		}
    409
    410		if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
    411			qp->fwres.iocbs_used += iores->iocb_cnt;
    412			return 0;
    413		} else {
    414			iores->res_type = RESOURCE_NONE;
    415			return -ENOSPC;
    416		}
    417	}
    418}
    419
    420static inline void
    421qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
    422{
    423	switch (iores->res_type) {
    424	case RESOURCE_NONE:
    425		break;
    426	default:
    427		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
    428			qp->fwres.iocbs_used -= iores->iocb_cnt;
    429		} else {
    430			// should not happen
    431			qp->fwres.iocbs_used = 0;
    432		}
    433		break;
    434	}
    435	iores->res_type = RESOURCE_NONE;
    436}
    437
    438#define ISP_REG_DISCONNECT 0xffffffffU
    439/**************************************************************************
    440 * qla2x00_isp_reg_stat
    441 *
    442 * Description:
    443 *        Read the host status register of ISP before aborting the command.
    444 *
    445 * Input:
    446 *       ha = pointer to host adapter structure.
    447 *
    448 *
    449 * Returns:
    450 *       Either true or false.
    451 *
    452 * Note: Return true if there is register disconnect.
    453 **************************************************************************/
    454static inline
    455uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
    456{
    457	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
    458	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
    459
    460	if (IS_P3P_TYPE(ha))
    461		return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
    462	else
    463		return ((rd_reg_dword(&reg->host_status)) ==
    464			ISP_REG_DISCONNECT);
    465}
    466
    467static inline
    468bool qla_pci_disconnected(struct scsi_qla_host *vha,
    469			  struct device_reg_24xx __iomem *reg)
    470{
    471	uint32_t stat;
    472	bool ret = false;
    473
    474	stat = rd_reg_dword(&reg->host_status);
    475	if (stat == 0xffffffff) {
    476		ql_log(ql_log_info, vha, 0x8041,
    477		       "detected PCI disconnect.\n");
    478		qla_schedule_eeh_work(vha);
    479		ret = true;
    480	}
    481	return ret;
    482}
    483
    484static inline bool
    485fcport_is_smaller(fc_port_t *fcport)
    486{
    487	if (wwn_to_u64(fcport->port_name) <
    488		wwn_to_u64(fcport->vha->port_name))
    489		return true;
    490	else
    491		return false;
    492}
    493
    494static inline bool
    495fcport_is_bigger(fc_port_t *fcport)
    496{
    497	return !fcport_is_smaller(fcport);
    498}