device_status.c (12613B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright IBM Corp. 2002 4 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 * 7 * Status accumulation and basic sense functions. 8 */ 9 10#include <linux/module.h> 11#include <linux/init.h> 12 13#include <asm/ccwdev.h> 14#include <asm/cio.h> 15 16#include "cio.h" 17#include "cio_debug.h" 18#include "css.h" 19#include "device.h" 20#include "ioasm.h" 21#include "io_sch.h" 22 23/* 24 * Check for any kind of channel or interface control check but don't 25 * issue the message for the console device 26 */ 27static void 28ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29{ 30 struct subchannel *sch = to_subchannel(cdev->dev.parent); 31 char dbf_text[15]; 32 33 if (!scsw_is_valid_cstat(&irb->scsw) || 34 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | 35 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) 36 return; 37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 38 "received" 39 " ... device %04x on subchannel 0.%x.%04x, dev_stat " 40 ": %02X sch_stat : %02X\n", 41 cdev->private->dev_id.devno, sch->schid.ssid, 42 sch->schid.sch_no, 43 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); 44 sprintf(dbf_text, "chk%x", sch->schid.sch_no); 45 CIO_TRACE_EVENT(0, dbf_text); 46 CIO_HEX_EVENT(0, irb, sizeof(struct irb)); 47} 48 49/* 50 * Some paths became not operational (pno bit in scsw is set). 51 */ 52static void 53ccw_device_path_notoper(struct ccw_device *cdev) 54{ 55 struct subchannel *sch; 56 57 sch = to_subchannel(cdev->dev.parent); 58 if (cio_update_schib(sch)) 59 goto doverify; 60 61 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " 62 "not operational \n", __func__, 63 sch->schid.ssid, sch->schid.sch_no, 64 sch->schib.pmcw.pnom); 65 66 sch->lpm &= ~sch->schib.pmcw.pnom; 67doverify: 68 cdev->private->flags.doverify = 1; 69} 70 71/* 72 * Copy valid bits from the extended control word to device irb. 73 */ 74static void 75ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76{ 77 /* 78 * Copy extended control bit if it is valid... yes there 79 * are condition that have to be met for the extended control 80 * bit to have meaning. Sick. 81 */ 82 cdev->private->dma_area->irb.scsw.cmd.ectl = 0; 83 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && 84 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) 85 cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; 86 /* Check if extended control word is valid. */ 87 if (!cdev->private->dma_area->irb.scsw.cmd.ectl) 88 return; 89 /* Copy concurrent sense / model dependent information. */ 90 memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw)); 91} 92 93/* 94 * Check if extended status word is valid. 95 */ 96static int 97ccw_device_accumulate_esw_valid(struct irb *irb) 98{ 99 if (!irb->scsw.cmd.eswf && 100 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) 101 return 0; 102 if (irb->scsw.cmd.stctl == 103 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && 104 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) 105 return 0; 106 return 1; 107} 108 109/* 110 * Copy valid bits from the extended status word to device irb. 111 */ 112static void 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 114{ 115 struct irb *cdev_irb; 116 struct sublog *cdev_sublog, *sublog; 117 118 if (!ccw_device_accumulate_esw_valid(irb)) 119 return; 120 121 cdev_irb = &cdev->private->dma_area->irb; 122 123 /* Copy last path used mask. */ 124 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; 125 126 /* Copy subchannel logout information if esw is of format 0. */ 127 if (irb->scsw.cmd.eswf) { 128 cdev_sublog = &cdev_irb->esw.esw0.sublog; 129 sublog = &irb->esw.esw0.sublog; 130 /* Copy extended status flags. */ 131 cdev_sublog->esf = sublog->esf; 132 /* 133 * Copy fields that have a meaning for channel data check 134 * channel control check and interface control check. 135 */ 136 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | 137 SCHN_STAT_CHN_CTRL_CHK | 138 SCHN_STAT_INTF_CTRL_CHK)) { 139 /* Copy ancillary report bit. */ 140 cdev_sublog->arep = sublog->arep; 141 /* Copy field-validity-flags. */ 142 cdev_sublog->fvf = sublog->fvf; 143 /* Copy storage access code. */ 144 cdev_sublog->sacc = sublog->sacc; 145 /* Copy termination code. */ 146 cdev_sublog->termc = sublog->termc; 147 /* Copy sequence code. */ 148 cdev_sublog->seqc = sublog->seqc; 149 } 150 /* Copy device status check. */ 151 cdev_sublog->devsc = sublog->devsc; 152 /* Copy secondary error. */ 153 cdev_sublog->serr = sublog->serr; 154 /* Copy i/o-error alert. */ 155 cdev_sublog->ioerr = sublog->ioerr; 156 /* Copy channel path timeout bit. */ 157 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) 158 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; 159 /* Copy failing storage address validity flag. */ 160 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; 161 if (cdev_irb->esw.esw0.erw.fsavf) { 162 /* ... and copy the failing storage address. */ 163 memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr, 164 sizeof (irb->esw.esw0.faddr)); 165 /* ... and copy the failing storage address format. */ 166 cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf; 167 } 168 /* Copy secondary ccw address validity bit. */ 169 cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf; 170 if (irb->esw.esw0.erw.scavf) 171 /* ... and copy the secondary ccw address. */ 172 cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr; 173 174 } 175 /* FIXME: DCTI for format 2? */ 176 177 /* Copy authorization bit. */ 178 cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; 179 /* Copy path verification required flag. */ 180 cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; 181 if (irb->esw.esw0.erw.pvrf) 182 cdev->private->flags.doverify = 1; 183 /* Copy concurrent sense bit. */ 184 cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; 185 if (irb->esw.esw0.erw.cons) 186 cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt; 187} 188 189/* 190 * Accumulate status from irb to devstat. 191 */ 192void 193ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) 194{ 195 struct irb *cdev_irb; 196 197 /* 198 * Check if the status pending bit is set in stctl. 199 * If not, the remaining bit have no meaning and we must ignore them. 200 * The esw is not meaningful as well... 201 */ 202 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) 203 return; 204 205 /* Check for channel checks and interface control checks. */ 206 ccw_device_msg_control_check(cdev, irb); 207 208 /* Check for path not operational. */ 209 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) 210 ccw_device_path_notoper(cdev); 211 /* No irb accumulation for transport mode irbs. */ 212 if (scsw_is_tm(&irb->scsw)) { 213 memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb)); 214 return; 215 } 216 /* 217 * Don't accumulate unsolicited interrupts. 218 */ 219 if (!scsw_is_solicited(&irb->scsw)) 220 return; 221 222 cdev_irb = &cdev->private->dma_area->irb; 223 224 /* 225 * If the clear function had been performed, all formerly pending 226 * status at the subchannel has been cleared and we must not pass 227 * intermediate accumulated status to the device driver. 228 */ 229 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) 230 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 231 232 /* Copy bits which are valid only for the start function. */ 233 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { 234 /* Copy key. */ 235 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; 236 /* Copy suspend control bit. */ 237 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; 238 /* Accumulate deferred condition code. */ 239 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; 240 /* Copy ccw format bit. */ 241 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; 242 /* Copy prefetch bit. */ 243 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; 244 /* Copy initial-status-interruption-control. */ 245 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; 246 /* Copy address limit checking control. */ 247 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; 248 /* Copy suppress suspend bit. */ 249 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; 250 } 251 252 /* Take care of the extended control bit and extended control word. */ 253 ccw_device_accumulate_ecw(cdev, irb); 254 255 /* Accumulate function control. */ 256 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; 257 /* Copy activity control. */ 258 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; 259 /* Accumulate status control. */ 260 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; 261 /* 262 * Copy ccw address if it is valid. This is a bit simplified 263 * but should be close enough for all practical purposes. 264 */ 265 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || 266 ((irb->scsw.cmd.stctl == 267 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && 268 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && 269 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || 270 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) 271 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; 272 /* Accumulate device status, but not the device busy flag. */ 273 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; 274 /* dstat is not always valid. */ 275 if (irb->scsw.cmd.stctl & 276 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS 277 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) 278 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; 279 /* Accumulate subchannel status. */ 280 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; 281 /* Copy residual count if it is valid. */ 282 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && 283 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) 284 == 0) 285 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; 286 287 /* Take care of bits in the extended status word. */ 288 ccw_device_accumulate_esw(cdev, irb); 289 290 /* 291 * Check whether we must issue a SENSE CCW ourselves if there is no 292 * concurrent sense facility installed for the subchannel. 293 * No sense is required if no delayed sense is pending 294 * and we did not get a unit check without sense information. 295 * 296 * Note: We should check for ioinfo[irq]->flags.consns but VM 297 * violates the ESA/390 architecture and doesn't present an 298 * operand exception for virtual devices without concurrent 299 * sense facility available/supported when enabling the 300 * concurrent sense facility. 301 */ 302 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 303 !(cdev_irb->esw.esw0.erw.cons)) 304 cdev->private->flags.dosense = 1; 305} 306 307/* 308 * Do a basic sense. 309 */ 310int 311ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) 312{ 313 struct subchannel *sch; 314 struct ccw1 *sense_ccw; 315 int rc; 316 317 sch = to_subchannel(cdev->dev.parent); 318 319 /* A sense is required, can we do it now ? */ 320 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) 321 /* 322 * we received an Unit Check but we have no final 323 * status yet, therefore we must delay the SENSE 324 * processing. We must not report this intermediate 325 * status to the device interrupt handler. 326 */ 327 return -EBUSY; 328 329 /* 330 * We have ending status but no sense information. Do a basic sense. 331 */ 332 sense_ccw = &to_io_private(sch)->dma_area->sense_ccw; 333 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; 334 sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw); 335 sense_ccw->count = SENSE_MAX_COUNT; 336 sense_ccw->flags = CCW_FLAG_SLI; 337 338 rc = cio_start(sch, sense_ccw, 0xff); 339 if (rc == -ENODEV || rc == -EACCES) 340 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 341 return rc; 342} 343 344/* 345 * Add information from basic sense to devstat. 346 */ 347void 348ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) 349{ 350 /* 351 * Check if the status pending bit is set in stctl. 352 * If not, the remaining bit have no meaning and we must ignore them. 353 * The esw is not meaningful as well... 354 */ 355 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) 356 return; 357 358 /* Check for channel checks and interface control checks. */ 359 ccw_device_msg_control_check(cdev, irb); 360 361 /* Check for path not operational. */ 362 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) 363 ccw_device_path_notoper(cdev); 364 365 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 366 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { 367 cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; 368 cdev->private->flags.dosense = 0; 369 } 370 /* Check if path verification is required. */ 371 if (ccw_device_accumulate_esw_valid(irb) && 372 irb->esw.esw0.erw.pvrf) 373 cdev->private->flags.doverify = 1; 374} 375 376/* 377 * This function accumulates the status into the private devstat and 378 * starts a basic sense if one is needed. 379 */ 380int 381ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) 382{ 383 ccw_device_accumulate_irb(cdev, irb); 384 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 385 return -EBUSY; 386 /* Check for basic sense. */ 387 if (cdev->private->flags.dosense && 388 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { 389 cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; 390 cdev->private->flags.dosense = 0; 391 return 0; 392 } 393 if (cdev->private->flags.dosense) { 394 ccw_device_do_sense(cdev, irb); 395 return -EBUSY; 396 } 397 return 0; 398} 399