cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

device_pgid.c (17979B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  CCW device PGID and path verification I/O handling.
      4 *
      5 *    Copyright IBM Corp. 2002, 2009
      6 *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
      7 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
      8 *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
      9 */
     10
     11#include <linux/kernel.h>
     12#include <linux/string.h>
     13#include <linux/bitops.h>
     14#include <linux/types.h>
     15#include <linux/errno.h>
     16#include <linux/slab.h>
     17#include <asm/ccwdev.h>
     18#include <asm/cio.h>
     19
     20#include "cio.h"
     21#include "cio_debug.h"
     22#include "device.h"
     23#include "io_sch.h"
     24
     25#define PGID_RETRIES	256
     26#define PGID_TIMEOUT	(10 * HZ)
     27
     28static void verify_start(struct ccw_device *cdev);
     29
     30/*
     31 * Process path verification data and report result.
     32 */
     33static void verify_done(struct ccw_device *cdev, int rc)
     34{
     35	struct subchannel *sch = to_subchannel(cdev->dev.parent);
     36	struct ccw_dev_id *id = &cdev->private->dev_id;
     37	int mpath = cdev->private->flags.mpath;
     38	int pgroup = cdev->private->flags.pgroup;
     39
     40	if (rc)
     41		goto out;
     42	/* Ensure consistent multipathing state at device and channel. */
     43	if (sch->config.mp != mpath) {
     44		sch->config.mp = mpath;
     45		rc = cio_commit_config(sch);
     46	}
     47out:
     48	CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
     49			 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
     50			 sch->vpm);
     51	ccw_device_verify_done(cdev, rc);
     52}
     53
     54/*
     55 * Create channel program to perform a NOOP.
     56 */
     57static void nop_build_cp(struct ccw_device *cdev)
     58{
     59	struct ccw_request *req = &cdev->private->req;
     60	struct ccw1 *cp = cdev->private->dma_area->iccws;
     61
     62	cp->cmd_code	= CCW_CMD_NOOP;
     63	cp->cda		= 0;
     64	cp->count	= 0;
     65	cp->flags	= CCW_FLAG_SLI;
     66	req->cp		= cp;
     67}
     68
     69/*
     70 * Perform NOOP on a single path.
     71 */
     72static void nop_do(struct ccw_device *cdev)
     73{
     74	struct subchannel *sch = to_subchannel(cdev->dev.parent);
     75	struct ccw_request *req = &cdev->private->req;
     76
     77	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
     78			      ~cdev->private->path_noirq_mask);
     79	if (!req->lpm)
     80		goto out_nopath;
     81	nop_build_cp(cdev);
     82	ccw_request_start(cdev);
     83	return;
     84
     85out_nopath:
     86	verify_done(cdev, sch->vpm ? 0 : -EACCES);
     87}
     88
     89/*
     90 * Adjust NOOP I/O status.
     91 */
     92static enum io_status nop_filter(struct ccw_device *cdev, void *data,
     93				 struct irb *irb, enum io_status status)
     94{
     95	/* Only subchannel status might indicate a path error. */
     96	if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
     97		return IO_DONE;
     98	return status;
     99}
    100
    101/*
    102 * Process NOOP request result for a single path.
    103 */
    104static void nop_callback(struct ccw_device *cdev, void *data, int rc)
    105{
    106	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    107	struct ccw_request *req = &cdev->private->req;
    108
    109	switch (rc) {
    110	case 0:
    111		sch->vpm |= req->lpm;
    112		break;
    113	case -ETIME:
    114		cdev->private->path_noirq_mask |= req->lpm;
    115		break;
    116	case -EACCES:
    117		cdev->private->path_notoper_mask |= req->lpm;
    118		break;
    119	default:
    120		goto err;
    121	}
    122	/* Continue on the next path. */
    123	req->lpm >>= 1;
    124	nop_do(cdev);
    125	return;
    126
    127err:
    128	verify_done(cdev, rc);
    129}
    130
    131/*
    132 * Create channel program to perform SET PGID on a single path.
    133 */
    134static void spid_build_cp(struct ccw_device *cdev, u8 fn)
    135{
    136	struct ccw_request *req = &cdev->private->req;
    137	struct ccw1 *cp = cdev->private->dma_area->iccws;
    138	int i = pathmask_to_pos(req->lpm);
    139	struct pgid *pgid = &cdev->private->dma_area->pgid[i];
    140
    141	pgid->inf.fc	= fn;
    142	cp->cmd_code	= CCW_CMD_SET_PGID;
    143	cp->cda		= (u32) (addr_t) pgid;
    144	cp->count	= sizeof(*pgid);
    145	cp->flags	= CCW_FLAG_SLI;
    146	req->cp		= cp;
    147}
    148
    149static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
    150{
    151	if (rc) {
    152		/* We don't know the path groups' state. Abort. */
    153		verify_done(cdev, rc);
    154		return;
    155	}
    156	/*
    157	 * Path groups have been reset. Restart path verification but
    158	 * leave paths in path_noirq_mask out.
    159	 */
    160	cdev->private->flags.pgid_unknown = 0;
    161	verify_start(cdev);
    162}
    163
    164/*
    165 * Reset pathgroups and restart path verification, leave unusable paths out.
    166 */
    167static void pgid_wipeout_start(struct ccw_device *cdev)
    168{
    169	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    170	struct ccw_dev_id *id = &cdev->private->dev_id;
    171	struct ccw_request *req = &cdev->private->req;
    172	u8 fn;
    173
    174	CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
    175		      id->ssid, id->devno, cdev->private->pgid_valid_mask,
    176		      cdev->private->path_noirq_mask);
    177
    178	/* Initialize request data. */
    179	memset(req, 0, sizeof(*req));
    180	req->timeout	= PGID_TIMEOUT;
    181	req->maxretries	= PGID_RETRIES;
    182	req->lpm	= sch->schib.pmcw.pam;
    183	req->callback	= pgid_wipeout_callback;
    184	fn = SPID_FUNC_DISBAND;
    185	if (cdev->private->flags.mpath)
    186		fn |= SPID_FUNC_MULTI_PATH;
    187	spid_build_cp(cdev, fn);
    188	ccw_request_start(cdev);
    189}
    190
    191/*
    192 * Perform establish/resign SET PGID on a single path.
    193 */
    194static void spid_do(struct ccw_device *cdev)
    195{
    196	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    197	struct ccw_request *req = &cdev->private->req;
    198	u8 fn;
    199
    200	/* Use next available path that is not already in correct state. */
    201	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
    202	if (!req->lpm)
    203		goto out_nopath;
    204	/* Channel program setup. */
    205	if (req->lpm & sch->opm)
    206		fn = SPID_FUNC_ESTABLISH;
    207	else
    208		fn = SPID_FUNC_RESIGN;
    209	if (cdev->private->flags.mpath)
    210		fn |= SPID_FUNC_MULTI_PATH;
    211	spid_build_cp(cdev, fn);
    212	ccw_request_start(cdev);
    213	return;
    214
    215out_nopath:
    216	if (cdev->private->flags.pgid_unknown) {
    217		/* At least one SPID could be partially done. */
    218		pgid_wipeout_start(cdev);
    219		return;
    220	}
    221	verify_done(cdev, sch->vpm ? 0 : -EACCES);
    222}
    223
    224/*
    225 * Process SET PGID request result for a single path.
    226 */
    227static void spid_callback(struct ccw_device *cdev, void *data, int rc)
    228{
    229	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    230	struct ccw_request *req = &cdev->private->req;
    231
    232	switch (rc) {
    233	case 0:
    234		sch->vpm |= req->lpm & sch->opm;
    235		break;
    236	case -ETIME:
    237		cdev->private->flags.pgid_unknown = 1;
    238		cdev->private->path_noirq_mask |= req->lpm;
    239		break;
    240	case -EACCES:
    241		cdev->private->path_notoper_mask |= req->lpm;
    242		break;
    243	case -EOPNOTSUPP:
    244		if (cdev->private->flags.mpath) {
    245			/* Try without multipathing. */
    246			cdev->private->flags.mpath = 0;
    247			goto out_restart;
    248		}
    249		/* Try without pathgrouping. */
    250		cdev->private->flags.pgroup = 0;
    251		goto out_restart;
    252	default:
    253		goto err;
    254	}
    255	req->lpm >>= 1;
    256	spid_do(cdev);
    257	return;
    258
    259out_restart:
    260	verify_start(cdev);
    261	return;
    262err:
    263	verify_done(cdev, rc);
    264}
    265
    266static void spid_start(struct ccw_device *cdev)
    267{
    268	struct ccw_request *req = &cdev->private->req;
    269
    270	/* Initialize request data. */
    271	memset(req, 0, sizeof(*req));
    272	req->timeout	= PGID_TIMEOUT;
    273	req->maxretries	= PGID_RETRIES;
    274	req->lpm	= 0x80;
    275	req->singlepath	= 1;
    276	req->callback	= spid_callback;
    277	spid_do(cdev);
    278}
    279
    280static int pgid_is_reset(struct pgid *p)
    281{
    282	char *c;
    283
    284	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
    285		if (*c != 0)
    286			return 0;
    287	}
    288	return 1;
    289}
    290
    291static int pgid_cmp(struct pgid *p1, struct pgid *p2)
    292{
    293	return memcmp((char *) p1 + 1, (char *) p2 + 1,
    294		      sizeof(struct pgid) - 1);
    295}
    296
    297/*
    298 * Determine pathgroup state from PGID data.
    299 */
    300static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
    301			 int *mismatch, u8 *reserved, u8 *reset)
    302{
    303	struct pgid *pgid = &cdev->private->dma_area->pgid[0];
    304	struct pgid *first = NULL;
    305	int lpm;
    306	int i;
    307
    308	*mismatch = 0;
    309	*reserved = 0;
    310	*reset = 0;
    311	for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
    312		if ((cdev->private->pgid_valid_mask & lpm) == 0)
    313			continue;
    314		if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
    315			*reserved |= lpm;
    316		if (pgid_is_reset(pgid)) {
    317			*reset |= lpm;
    318			continue;
    319		}
    320		if (!first) {
    321			first = pgid;
    322			continue;
    323		}
    324		if (pgid_cmp(pgid, first) != 0)
    325			*mismatch = 1;
    326	}
    327	if (!first)
    328		first = &channel_subsystems[0]->global_pgid;
    329	*p = first;
    330}
    331
    332static u8 pgid_to_donepm(struct ccw_device *cdev)
    333{
    334	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    335	struct pgid *pgid;
    336	int i;
    337	int lpm;
    338	u8 donepm = 0;
    339
    340	/* Set bits for paths which are already in the target state. */
    341	for (i = 0; i < 8; i++) {
    342		lpm = 0x80 >> i;
    343		if ((cdev->private->pgid_valid_mask & lpm) == 0)
    344			continue;
    345		pgid = &cdev->private->dma_area->pgid[i];
    346		if (sch->opm & lpm) {
    347			if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
    348				continue;
    349		} else {
    350			if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
    351				continue;
    352		}
    353		if (cdev->private->flags.mpath) {
    354			if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
    355				continue;
    356		} else {
    357			if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
    358				continue;
    359		}
    360		donepm |= lpm;
    361	}
    362
    363	return donepm;
    364}
    365
    366static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
    367{
    368	int i;
    369
    370	for (i = 0; i < 8; i++)
    371		memcpy(&cdev->private->dma_area->pgid[i], pgid,
    372		       sizeof(struct pgid));
    373}
    374
    375/*
    376 * Process SENSE PGID data and report result.
    377 */
    378static void snid_done(struct ccw_device *cdev, int rc)
    379{
    380	struct ccw_dev_id *id = &cdev->private->dev_id;
    381	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    382	struct pgid *pgid;
    383	int mismatch = 0;
    384	u8 reserved = 0;
    385	u8 reset = 0;
    386	u8 donepm;
    387
    388	if (rc)
    389		goto out;
    390	pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
    391	if (reserved == cdev->private->pgid_valid_mask)
    392		rc = -EUSERS;
    393	else if (mismatch)
    394		rc = -EOPNOTSUPP;
    395	else {
    396		donepm = pgid_to_donepm(cdev);
    397		sch->vpm = donepm & sch->opm;
    398		cdev->private->pgid_reset_mask |= reset;
    399		cdev->private->pgid_todo_mask &=
    400			~(donepm | cdev->private->path_noirq_mask);
    401		pgid_fill(cdev, pgid);
    402	}
    403out:
    404	CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
    405		      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
    406		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
    407		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
    408	switch (rc) {
    409	case 0:
    410		if (cdev->private->flags.pgid_unknown) {
    411			pgid_wipeout_start(cdev);
    412			return;
    413		}
    414		/* Anything left to do? */
    415		if (cdev->private->pgid_todo_mask == 0) {
    416			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
    417			return;
    418		}
    419		/* Perform path-grouping. */
    420		spid_start(cdev);
    421		break;
    422	case -EOPNOTSUPP:
    423		/* Path-grouping not supported. */
    424		cdev->private->flags.pgroup = 0;
    425		cdev->private->flags.mpath = 0;
    426		verify_start(cdev);
    427		break;
    428	default:
    429		verify_done(cdev, rc);
    430	}
    431}
    432
    433/*
    434 * Create channel program to perform a SENSE PGID on a single path.
    435 */
    436static void snid_build_cp(struct ccw_device *cdev)
    437{
    438	struct ccw_request *req = &cdev->private->req;
    439	struct ccw1 *cp = cdev->private->dma_area->iccws;
    440	int i = pathmask_to_pos(req->lpm);
    441
    442	/* Channel program setup. */
    443	cp->cmd_code	= CCW_CMD_SENSE_PGID;
    444	cp->cda		= (u32) (addr_t) &cdev->private->dma_area->pgid[i];
    445	cp->count	= sizeof(struct pgid);
    446	cp->flags	= CCW_FLAG_SLI;
    447	req->cp		= cp;
    448}
    449
    450/*
    451 * Perform SENSE PGID on a single path.
    452 */
    453static void snid_do(struct ccw_device *cdev)
    454{
    455	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    456	struct ccw_request *req = &cdev->private->req;
    457	int ret;
    458
    459	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
    460			      ~cdev->private->path_noirq_mask);
    461	if (!req->lpm)
    462		goto out_nopath;
    463	snid_build_cp(cdev);
    464	ccw_request_start(cdev);
    465	return;
    466
    467out_nopath:
    468	if (cdev->private->pgid_valid_mask)
    469		ret = 0;
    470	else if (cdev->private->path_noirq_mask)
    471		ret = -ETIME;
    472	else
    473		ret = -EACCES;
    474	snid_done(cdev, ret);
    475}
    476
    477/*
    478 * Process SENSE PGID request result for single path.
    479 */
    480static void snid_callback(struct ccw_device *cdev, void *data, int rc)
    481{
    482	struct ccw_request *req = &cdev->private->req;
    483
    484	switch (rc) {
    485	case 0:
    486		cdev->private->pgid_valid_mask |= req->lpm;
    487		break;
    488	case -ETIME:
    489		cdev->private->flags.pgid_unknown = 1;
    490		cdev->private->path_noirq_mask |= req->lpm;
    491		break;
    492	case -EACCES:
    493		cdev->private->path_notoper_mask |= req->lpm;
    494		break;
    495	default:
    496		goto err;
    497	}
    498	/* Continue on the next path. */
    499	req->lpm >>= 1;
    500	snid_do(cdev);
    501	return;
    502
    503err:
    504	snid_done(cdev, rc);
    505}
    506
    507/*
    508 * Perform path verification.
    509 */
    510static void verify_start(struct ccw_device *cdev)
    511{
    512	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    513	struct ccw_request *req = &cdev->private->req;
    514	struct ccw_dev_id *devid = &cdev->private->dev_id;
    515
    516	sch->vpm = 0;
    517	sch->lpm = sch->schib.pmcw.pam;
    518
    519	/* Initialize PGID data. */
    520	memset(cdev->private->dma_area->pgid, 0,
    521	       sizeof(cdev->private->dma_area->pgid));
    522	cdev->private->pgid_valid_mask = 0;
    523	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
    524	cdev->private->path_notoper_mask = 0;
    525
    526	/* Initialize request data. */
    527	memset(req, 0, sizeof(*req));
    528	req->timeout	= PGID_TIMEOUT;
    529	req->maxretries	= PGID_RETRIES;
    530	req->lpm	= 0x80;
    531	req->singlepath	= 1;
    532	if (cdev->private->flags.pgroup) {
    533		CIO_TRACE_EVENT(4, "snid");
    534		CIO_HEX_EVENT(4, devid, sizeof(*devid));
    535		req->callback	= snid_callback;
    536		snid_do(cdev);
    537	} else {
    538		CIO_TRACE_EVENT(4, "nop");
    539		CIO_HEX_EVENT(4, devid, sizeof(*devid));
    540		req->filter	= nop_filter;
    541		req->callback	= nop_callback;
    542		nop_do(cdev);
    543	}
    544}
    545
    546/**
    547 * ccw_device_verify_start - perform path verification
    548 * @cdev: ccw device
    549 *
    550 * Perform an I/O on each available channel path to @cdev to determine which
    551 * paths are operational. The resulting path mask is stored in sch->vpm.
    552 * If device options specify pathgrouping, establish a pathgroup for the
    553 * operational paths. When finished, call ccw_device_verify_done with a
    554 * return code specifying the result.
    555 */
    556void ccw_device_verify_start(struct ccw_device *cdev)
    557{
    558	CIO_TRACE_EVENT(4, "vrfy");
    559	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
    560	/*
    561	 * Initialize pathgroup and multipath state with target values.
    562	 * They may change in the course of path verification.
    563	 */
    564	cdev->private->flags.pgroup = cdev->private->options.pgroup;
    565	cdev->private->flags.mpath = cdev->private->options.mpath;
    566	cdev->private->flags.doverify = 0;
    567	cdev->private->path_noirq_mask = 0;
    568	verify_start(cdev);
    569}
    570
    571/*
    572 * Process disband SET PGID request result.
    573 */
    574static void disband_callback(struct ccw_device *cdev, void *data, int rc)
    575{
    576	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    577	struct ccw_dev_id *id = &cdev->private->dev_id;
    578
    579	if (rc)
    580		goto out;
    581	/* Ensure consistent multipathing state at device and channel. */
    582	cdev->private->flags.mpath = 0;
    583	if (sch->config.mp) {
    584		sch->config.mp = 0;
    585		rc = cio_commit_config(sch);
    586	}
    587out:
    588	CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
    589		      rc);
    590	ccw_device_disband_done(cdev, rc);
    591}
    592
    593/**
    594 * ccw_device_disband_start - disband pathgroup
    595 * @cdev: ccw device
    596 *
    597 * Execute a SET PGID channel program on @cdev to disband a previously
    598 * established pathgroup. When finished, call ccw_device_disband_done with
    599 * a return code specifying the result.
    600 */
    601void ccw_device_disband_start(struct ccw_device *cdev)
    602{
    603	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    604	struct ccw_request *req = &cdev->private->req;
    605	u8 fn;
    606
    607	CIO_TRACE_EVENT(4, "disb");
    608	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
    609	/* Request setup. */
    610	memset(req, 0, sizeof(*req));
    611	req->timeout	= PGID_TIMEOUT;
    612	req->maxretries	= PGID_RETRIES;
    613	req->lpm	= sch->schib.pmcw.pam & sch->opm;
    614	req->singlepath	= 1;
    615	req->callback	= disband_callback;
    616	fn = SPID_FUNC_DISBAND;
    617	if (cdev->private->flags.mpath)
    618		fn |= SPID_FUNC_MULTI_PATH;
    619	spid_build_cp(cdev, fn);
    620	ccw_request_start(cdev);
    621}
    622
    623struct stlck_data {
    624	struct completion done;
    625	int rc;
    626};
    627
    628static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
    629{
    630	struct ccw_request *req = &cdev->private->req;
    631	struct ccw1 *cp = cdev->private->dma_area->iccws;
    632
    633	cp[0].cmd_code = CCW_CMD_STLCK;
    634	cp[0].cda = (u32) (addr_t) buf1;
    635	cp[0].count = 32;
    636	cp[0].flags = CCW_FLAG_CC;
    637	cp[1].cmd_code = CCW_CMD_RELEASE;
    638	cp[1].cda = (u32) (addr_t) buf2;
    639	cp[1].count = 32;
    640	cp[1].flags = 0;
    641	req->cp = cp;
    642}
    643
    644static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
    645{
    646	struct stlck_data *sdata = data;
    647
    648	sdata->rc = rc;
    649	complete(&sdata->done);
    650}
    651
    652/**
    653 * ccw_device_stlck_start - perform unconditional release
    654 * @cdev: ccw device
    655 * @data: data pointer to be passed to ccw_device_stlck_done
    656 * @buf1: data pointer used in channel program
    657 * @buf2: data pointer used in channel program
    658 *
    659 * Execute a channel program on @cdev to release an existing PGID reservation.
    660 */
    661static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
    662				   void *buf1, void *buf2)
    663{
    664	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    665	struct ccw_request *req = &cdev->private->req;
    666
    667	CIO_TRACE_EVENT(4, "stlck");
    668	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
    669	/* Request setup. */
    670	memset(req, 0, sizeof(*req));
    671	req->timeout	= PGID_TIMEOUT;
    672	req->maxretries	= PGID_RETRIES;
    673	req->lpm	= sch->schib.pmcw.pam & sch->opm;
    674	req->data	= data;
    675	req->callback	= stlck_callback;
    676	stlck_build_cp(cdev, buf1, buf2);
    677	ccw_request_start(cdev);
    678}
    679
    680/*
    681 * Perform unconditional reserve + release.
    682 */
    683int ccw_device_stlck(struct ccw_device *cdev)
    684{
    685	struct subchannel *sch = to_subchannel(cdev->dev.parent);
    686	struct stlck_data data;
    687	u8 *buffer;
    688	int rc;
    689
    690	/* Check if steal lock operation is valid for this device. */
    691	if (cdev->drv) {
    692		if (!cdev->private->options.force)
    693			return -EINVAL;
    694	}
    695	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
    696	if (!buffer)
    697		return -ENOMEM;
    698	init_completion(&data.done);
    699	data.rc = -EIO;
    700	spin_lock_irq(sch->lock);
    701	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
    702	if (rc)
    703		goto out_unlock;
    704	/* Perform operation. */
    705	cdev->private->state = DEV_STATE_STEAL_LOCK;
    706	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
    707	spin_unlock_irq(sch->lock);
    708	/* Wait for operation to finish. */
    709	if (wait_for_completion_interruptible(&data.done)) {
    710		/* Got a signal. */
    711		spin_lock_irq(sch->lock);
    712		ccw_request_cancel(cdev);
    713		spin_unlock_irq(sch->lock);
    714		wait_for_completion(&data.done);
    715	}
    716	rc = data.rc;
    717	/* Check results. */
    718	spin_lock_irq(sch->lock);
    719	cio_disable_subchannel(sch);
    720	cdev->private->state = DEV_STATE_BOXED;
    721out_unlock:
    722	spin_unlock_irq(sch->lock);
    723	kfree(buffer);
    724
    725	return rc;
    726}