cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

zfcp_qdio.c (16643B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * zfcp device driver
      4 *
      5 * Setup and helper functions to access QDIO.
      6 *
      7 * Copyright IBM Corp. 2002, 2020
      8 */
      9
     10#define KMSG_COMPONENT "zfcp"
     11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     12
     13#include <linux/lockdep.h>
     14#include <linux/slab.h>
     15#include <linux/module.h>
     16#include "zfcp_ext.h"
     17#include "zfcp_qdio.h"
     18
     19static bool enable_multibuffer = true;
     20module_param_named(datarouter, enable_multibuffer, bool, 0400);
     21MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
     22
     23#define ZFCP_QDIO_REQUEST_RESCAN_MSECS	(MSEC_PER_SEC * 10)
     24#define ZFCP_QDIO_REQUEST_SCAN_MSECS	MSEC_PER_SEC
     25
     26static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
     27				    unsigned int qdio_err)
     28{
     29	struct zfcp_adapter *adapter = qdio->adapter;
     30
     31	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
     32
     33	if (qdio_err & QDIO_ERROR_SLSB_STATE) {
     34		zfcp_qdio_siosl(adapter);
     35		zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
     36		return;
     37	}
     38	zfcp_erp_adapter_reopen(adapter,
     39				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
     40				ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
     41}
     42
     43static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
     44{
     45	int i, sbal_idx;
     46
     47	for (i = first; i < first + cnt; i++) {
     48		sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
     49		memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
     50	}
     51}
     52
     53/* this needs to be called prior to updating the queue fill level */
     54static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
     55{
     56	unsigned long long now, span;
     57	int used;
     58
     59	now = get_tod_clock_monotonic();
     60	span = (now - qdio->req_q_time) >> 12;
     61	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
     62	qdio->req_q_util += used * span;
     63	qdio->req_q_time = now;
     64}
     65
     66static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
     67			      int queue_no, int idx, int count,
     68			      unsigned long parm)
     69{
     70	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
     71
     72	zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
     73}
     74
     75static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
     76{
     77	struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
     78	struct ccw_device *cdev = qdio->adapter->ccw_device;
     79	unsigned int start, error;
     80	int completed;
     81
     82	completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
     83	if (completed > 0) {
     84		if (error) {
     85			zfcp_qdio_handler_error(qdio, "qdreqt1", error);
     86		} else {
     87			/* cleanup all SBALs being program-owned now */
     88			zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
     89
     90			spin_lock_irq(&qdio->stat_lock);
     91			zfcp_qdio_account(qdio);
     92			spin_unlock_irq(&qdio->stat_lock);
     93			atomic_add(completed, &qdio->req_q_free);
     94			wake_up(&qdio->req_q_wq);
     95		}
     96	}
     97
     98	if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
     99		timer_reduce(&qdio->request_timer,
    100			     jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
    101}
    102
    103static void zfcp_qdio_request_timer(struct timer_list *timer)
    104{
    105	struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
    106
    107	tasklet_schedule(&qdio->request_tasklet);
    108}
    109
    110static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
    111			       int queue_no, int idx, int count,
    112			       unsigned long parm)
    113{
    114	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
    115	struct zfcp_adapter *adapter = qdio->adapter;
    116	int sbal_no, sbal_idx;
    117
    118	if (unlikely(qdio_err)) {
    119		if (zfcp_adapter_multi_buffer_active(adapter)) {
    120			void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
    121			struct qdio_buffer_element *sbale;
    122			u64 req_id;
    123			u8 scount;
    124
    125			memset(pl, 0,
    126			       ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
    127			sbale = qdio->res_q[idx]->element;
    128			req_id = sbale->addr;
    129			scount = min(sbale->scount + 1,
    130				     ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
    131				     /* incl. signaling SBAL */
    132
    133			for (sbal_no = 0; sbal_no < scount; sbal_no++) {
    134				sbal_idx = (idx + sbal_no) %
    135					QDIO_MAX_BUFFERS_PER_Q;
    136				pl[sbal_no] = qdio->res_q[sbal_idx];
    137			}
    138			zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
    139		}
    140		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
    141		return;
    142	}
    143
    144	/*
    145	 * go through all SBALs from input queue currently
    146	 * returned by QDIO layer
    147	 */
    148	for (sbal_no = 0; sbal_no < count; sbal_no++) {
    149		sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
    150		/* go through all SBALEs of SBAL */
    151		zfcp_fsf_reqid_check(qdio, sbal_idx);
    152	}
    153
    154	/*
    155	 * put SBALs back to response queue
    156	 */
    157	if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
    158		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
    159}
    160
    161static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
    162{
    163	struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
    164	struct ccw_device *cdev = qdio->adapter->ccw_device;
    165	unsigned int start, error;
    166	int completed;
    167
    168	if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
    169		tasklet_schedule(&qdio->request_tasklet);
    170
    171	/* Check the Response Queue: */
    172	completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
    173	if (completed < 0)
    174		return;
    175	if (completed > 0)
    176		zfcp_qdio_int_resp(cdev, error, 0, start, completed,
    177				   (unsigned long) qdio);
    178
    179	if (qdio_start_irq(cdev))
    180		/* More work pending: */
    181		tasklet_schedule(&qdio->irq_tasklet);
    182}
    183
    184static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
    185{
    186	struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
    187
    188	tasklet_schedule(&qdio->irq_tasklet);
    189}
    190
    191static struct qdio_buffer_element *
    192zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
    193{
    194	struct qdio_buffer_element *sbale;
    195
    196	/* set last entry flag in current SBALE of current SBAL */
    197	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
    198	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
    199
    200	/* don't exceed last allowed SBAL */
    201	if (q_req->sbal_last == q_req->sbal_limit)
    202		return NULL;
    203
    204	/* set chaining flag in first SBALE of current SBAL */
    205	sbale = zfcp_qdio_sbale_req(qdio, q_req);
    206	sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
    207
    208	/* calculate index of next SBAL */
    209	q_req->sbal_last++;
    210	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
    211
    212	/* keep this requests number of SBALs up-to-date */
    213	q_req->sbal_number++;
    214	BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
    215
    216	/* start at first SBALE of new SBAL */
    217	q_req->sbale_curr = 0;
    218
    219	/* set storage-block type for new SBAL */
    220	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
    221	sbale->sflags |= q_req->sbtype;
    222
    223	return sbale;
    224}
    225
    226static struct qdio_buffer_element *
    227zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
    228{
    229	if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
    230		return zfcp_qdio_sbal_chain(qdio, q_req);
    231	q_req->sbale_curr++;
    232	return zfcp_qdio_sbale_curr(qdio, q_req);
    233}
    234
    235/**
    236 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
    237 * @qdio: pointer to struct zfcp_qdio
    238 * @q_req: pointer to struct zfcp_qdio_req
    239 * @sg: scatter-gather list
    240 * Returns: zero or -EINVAL on error
    241 */
    242int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
    243			    struct scatterlist *sg)
    244{
    245	struct qdio_buffer_element *sbale;
    246
    247	/* set storage-block type for this request */
    248	sbale = zfcp_qdio_sbale_req(qdio, q_req);
    249	sbale->sflags |= q_req->sbtype;
    250
    251	for (; sg; sg = sg_next(sg)) {
    252		sbale = zfcp_qdio_sbale_next(qdio, q_req);
    253		if (!sbale) {
    254			atomic_inc(&qdio->req_q_full);
    255			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
    256					     q_req->sbal_number);
    257			return -EINVAL;
    258		}
    259		sbale->addr = sg_phys(sg);
    260		sbale->length = sg->length;
    261	}
    262	return 0;
    263}
    264
    265static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
    266{
    267	if (atomic_read(&qdio->req_q_free) ||
    268	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
    269		return 1;
    270	return 0;
    271}
    272
    273/**
    274 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
    275 * @qdio: pointer to struct zfcp_qdio
    276 *
    277 * The req_q_lock must be held by the caller of this function, and
    278 * this function may only be called from process context; it will
    279 * sleep when waiting for a free sbal.
    280 *
    281 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
    282 */
    283int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
    284{
    285	long ret;
    286
    287	ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
    288		       zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
    289
    290	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
    291		return -EIO;
    292
    293	if (ret > 0)
    294		return 0;
    295
    296	if (!ret) {
    297		atomic_inc(&qdio->req_q_full);
    298		/* assume hanging outbound queue, try queue recovery */
    299		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
    300	}
    301
    302	return -EIO;
    303}
    304
    305/**
    306 * zfcp_qdio_send - send req to QDIO
    307 * @qdio: pointer to struct zfcp_qdio
    308 * @q_req: pointer to struct zfcp_qdio_req
    309 * Returns: 0 on success, error otherwise
    310 */
    311int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
    312{
    313	int retval;
    314	u8 sbal_number = q_req->sbal_number;
    315
    316	/*
    317	 * This should actually be a spin_lock_bh(stat_lock), to protect against
    318	 * Request Queue completion processing in tasklet context.
    319	 * But we can't do so (and are safe), as we always get called with IRQs
    320	 * disabled by spin_lock_irq[save](req_q_lock).
    321	 */
    322	lockdep_assert_irqs_disabled();
    323	spin_lock(&qdio->stat_lock);
    324	zfcp_qdio_account(qdio);
    325	spin_unlock(&qdio->stat_lock);
    326
    327	atomic_sub(sbal_number, &qdio->req_q_free);
    328
    329	retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
    330					       q_req->sbal_first, sbal_number,
    331					       NULL);
    332
    333	if (unlikely(retval)) {
    334		/* Failed to submit the IO, roll back our modifications. */
    335		atomic_add(sbal_number, &qdio->req_q_free);
    336		zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
    337				     sbal_number);
    338		return retval;
    339	}
    340
    341	if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
    342		tasklet_schedule(&qdio->request_tasklet);
    343	else
    344		timer_reduce(&qdio->request_timer,
    345			     jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
    346
    347	/* account for transferred buffers */
    348	qdio->req_q_idx += sbal_number;
    349	qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
    350
    351	return 0;
    352}
    353
    354/**
    355 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
    356 * @qdio: pointer to struct zfcp_qdio
    357 * Returns: -ENOMEM on memory allocation error or return value from
    358 *          qdio_allocate
    359 */
    360static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
    361{
    362	int ret;
    363
    364	ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
    365	if (ret)
    366		return -ENOMEM;
    367
    368	ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
    369	if (ret)
    370		goto free_req_q;
    371
    372	init_waitqueue_head(&qdio->req_q_wq);
    373
    374	ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
    375	if (ret)
    376		goto free_res_q;
    377
    378	return 0;
    379
    380free_res_q:
    381	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
    382free_req_q:
    383	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
    384	return ret;
    385}
    386
    387/**
    388 * zfcp_qdio_close - close qdio queues for an adapter
    389 * @qdio: pointer to structure zfcp_qdio
    390 */
    391void zfcp_qdio_close(struct zfcp_qdio *qdio)
    392{
    393	struct zfcp_adapter *adapter = qdio->adapter;
    394	int idx, count;
    395
    396	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
    397		return;
    398
    399	/*
    400	 * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
    401	 * during qdio_shutdown().
    402	 */
    403	spin_lock_irq(&qdio->req_q_lock);
    404	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
    405	spin_unlock_irq(&qdio->req_q_lock);
    406
    407	wake_up(&qdio->req_q_wq);
    408
    409	tasklet_disable(&qdio->irq_tasklet);
    410	tasklet_disable(&qdio->request_tasklet);
    411	del_timer_sync(&qdio->request_timer);
    412	qdio_stop_irq(adapter->ccw_device);
    413	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
    414
    415	/* cleanup used outbound sbals */
    416	count = atomic_read(&qdio->req_q_free);
    417	if (count < QDIO_MAX_BUFFERS_PER_Q) {
    418		idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
    419		count = QDIO_MAX_BUFFERS_PER_Q - count;
    420		zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
    421	}
    422	qdio->req_q_idx = 0;
    423	atomic_set(&qdio->req_q_free, 0);
    424}
    425
    426void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
    427			    const struct zfcp_qdio *const qdio)
    428{
    429	struct Scsi_Host *const shost = adapter->scsi_host;
    430
    431	if (shost == NULL)
    432		return;
    433
    434	shost->sg_tablesize = qdio->max_sbale_per_req;
    435	shost->max_sectors = qdio->max_sbale_per_req * 8;
    436}
    437
    438/**
    439 * zfcp_qdio_open - prepare and initialize response queue
    440 * @qdio: pointer to struct zfcp_qdio
    441 * Returns: 0 on success, otherwise -EIO
    442 */
    443int zfcp_qdio_open(struct zfcp_qdio *qdio)
    444{
    445	struct qdio_buffer **input_sbals[1] = {qdio->res_q};
    446	struct qdio_buffer **output_sbals[1] = {qdio->req_q};
    447	struct qdio_buffer_element *sbale;
    448	struct qdio_initialize init_data = {0};
    449	struct zfcp_adapter *adapter = qdio->adapter;
    450	struct ccw_device *cdev = adapter->ccw_device;
    451	struct qdio_ssqd_desc ssqd;
    452	int cc;
    453
    454	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
    455		return -EIO;
    456
    457	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
    458			  &qdio->adapter->status);
    459
    460	init_data.q_format = QDIO_ZFCP_QFMT;
    461	init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
    462	if (enable_multibuffer)
    463		init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
    464	init_data.no_input_qs = 1;
    465	init_data.no_output_qs = 1;
    466	init_data.input_handler = zfcp_qdio_int_resp;
    467	init_data.output_handler = zfcp_qdio_int_req;
    468	init_data.irq_poll = zfcp_qdio_poll;
    469	init_data.int_parm = (unsigned long) qdio;
    470	init_data.input_sbal_addr_array = input_sbals;
    471	init_data.output_sbal_addr_array = output_sbals;
    472
    473	if (qdio_establish(cdev, &init_data))
    474		goto failed_establish;
    475
    476	if (qdio_get_ssqd_desc(cdev, &ssqd))
    477		goto failed_qdio;
    478
    479	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
    480		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
    481				&qdio->adapter->status);
    482
    483	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
    484		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
    485		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
    486	} else {
    487		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
    488		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
    489	}
    490
    491	qdio->max_sbale_per_req =
    492		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
    493		- 2;
    494	if (qdio_activate(cdev))
    495		goto failed_qdio;
    496
    497	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
    498		sbale = &(qdio->res_q[cc]->element[0]);
    499		sbale->length = 0;
    500		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
    501		sbale->sflags = 0;
    502		sbale->addr = 0;
    503	}
    504
    505	if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
    506		goto failed_qdio;
    507
    508	/* set index of first available SBALS / number of available SBALS */
    509	qdio->req_q_idx = 0;
    510	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
    511	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
    512
    513	/* Enable processing for Request Queue completions: */
    514	tasklet_enable(&qdio->request_tasklet);
    515	/* Enable processing for QDIO interrupts: */
    516	tasklet_enable(&qdio->irq_tasklet);
    517	/* This results in a qdio_start_irq(): */
    518	tasklet_schedule(&qdio->irq_tasklet);
    519
    520	zfcp_qdio_shost_update(adapter, qdio);
    521
    522	return 0;
    523
    524failed_qdio:
    525	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
    526failed_establish:
    527	dev_err(&cdev->dev,
    528		"Setting up the QDIO connection to the FCP adapter failed\n");
    529	return -EIO;
    530}
    531
    532void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
    533{
    534	if (!qdio)
    535		return;
    536
    537	tasklet_kill(&qdio->irq_tasklet);
    538	tasklet_kill(&qdio->request_tasklet);
    539
    540	if (qdio->adapter->ccw_device)
    541		qdio_free(qdio->adapter->ccw_device);
    542
    543	qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
    544	qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
    545	kfree(qdio);
    546}
    547
    548int zfcp_qdio_setup(struct zfcp_adapter *adapter)
    549{
    550	struct zfcp_qdio *qdio;
    551
    552	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
    553	if (!qdio)
    554		return -ENOMEM;
    555
    556	qdio->adapter = adapter;
    557
    558	if (zfcp_qdio_allocate(qdio)) {
    559		kfree(qdio);
    560		return -ENOMEM;
    561	}
    562
    563	spin_lock_init(&qdio->req_q_lock);
    564	spin_lock_init(&qdio->stat_lock);
    565	timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
    566	tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
    567	tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
    568	tasklet_disable(&qdio->irq_tasklet);
    569	tasklet_disable(&qdio->request_tasklet);
    570
    571	adapter->qdio = qdio;
    572	return 0;
    573}
    574
    575/**
    576 * zfcp_qdio_siosl - Trigger logging in FCP channel
    577 * @adapter: The zfcp_adapter where to trigger logging
    578 *
    579 * Call the cio siosl function to trigger hardware logging.  This
    580 * wrapper function sets a flag to ensure hardware logging is only
    581 * triggered once before going through qdio shutdown.
    582 *
    583 * The triggers are always run from qdio tasklet context, so no
    584 * additional synchronization is necessary.
    585 */
    586void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
    587{
    588	int rc;
    589
    590	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
    591		return;
    592
    593	rc = ccw_device_siosl(adapter->ccw_device);
    594	if (!rc)
    595		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
    596				&adapter->status);
    597}