device_fsm.c (31143B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * finite state machine for device handling 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10#include <linux/module.h> 11#include <linux/init.h> 12#include <linux/jiffies.h> 13#include <linux/string.h> 14 15#include <asm/ccwdev.h> 16#include <asm/cio.h> 17#include <asm/chpid.h> 18 19#include "cio.h" 20#include "cio_debug.h" 21#include "css.h" 22#include "device.h" 23#include "chsc.h" 24#include "ioasm.h" 25#include "chp.h" 26 27static int timeout_log_enabled; 28 29static int __init ccw_timeout_log_setup(char *unused) 30{ 31 timeout_log_enabled = 1; 32 return 1; 33} 34 35__setup("ccw_timeout_log", ccw_timeout_log_setup); 36 37static void ccw_timeout_log(struct ccw_device *cdev) 38{ 39 struct schib schib; 40 struct subchannel *sch; 41 struct io_subchannel_private *private; 42 union orb *orb; 43 int cc; 44 45 sch = to_subchannel(cdev->dev.parent); 46 private = to_io_private(sch); 47 orb = &private->orb; 48 cc = stsch(sch->schid, &schib); 49 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %lx, " 51 "device information:\n", get_tod_clock()); 52 printk(KERN_WARNING "cio: orb:\n"); 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 54 orb, sizeof(*orb), 0); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", 56 dev_name(&cdev->dev)); 57 printk(KERN_WARNING "cio: subchannel bus id: %s\n", 58 dev_name(&sch->dev)); 59 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 60 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 61 62 if (orb->tm.b) { 63 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 64 printk(KERN_WARNING "cio: last tcw:\n"); 65 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 (void *)(addr_t)orb->tm.tcw, 67 sizeof(struct tcw), 0); 68 } else { 69 printk(KERN_WARNING "cio: orb indicates command mode\n"); 70 if ((void *)(addr_t)orb->cmd.cpa == 71 &private->dma_area->sense_ccw || 72 (void *)(addr_t)orb->cmd.cpa == 73 cdev->private->dma_area->iccws) 74 printk(KERN_WARNING "cio: last channel program " 75 "(intern):\n"); 76 else 77 printk(KERN_WARNING "cio: last channel program:\n"); 78 79 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 80 (void *)(addr_t)orb->cmd.cpa, 81 sizeof(struct ccw1), 0); 82 } 83 printk(KERN_WARNING "cio: ccw device state: %d\n", 84 cdev->private->state); 85 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 86 printk(KERN_WARNING "cio: schib:\n"); 87 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 88 &schib, sizeof(schib), 0); 89 printk(KERN_WARNING "cio: ccw device flags:\n"); 90 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 91 &cdev->private->flags, sizeof(cdev->private->flags), 0); 92} 93 94/* 95 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 96 */ 97void 98ccw_device_timeout(struct timer_list *t) 99{ 100 struct ccw_device_private *priv = from_timer(priv, t, timer); 101 struct ccw_device *cdev = priv->cdev; 102 103 spin_lock_irq(cdev->ccwlock); 104 if (timeout_log_enabled) 105 ccw_timeout_log(cdev); 106 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 107 spin_unlock_irq(cdev->ccwlock); 108} 109 110/* 111 * Set timeout 112 */ 113void 114ccw_device_set_timeout(struct ccw_device *cdev, int expires) 115{ 116 if (expires == 0) 117 del_timer(&cdev->private->timer); 118 else 119 mod_timer(&cdev->private->timer, jiffies + expires); 120} 121 122int 123ccw_device_cancel_halt_clear(struct ccw_device *cdev) 124{ 125 struct subchannel *sch; 126 int ret; 127 128 sch = to_subchannel(cdev->dev.parent); 129 ret = cio_cancel_halt_clear(sch, &cdev->private->iretry); 130 131 if (ret == -EIO) 132 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", 133 cdev->private->dev_id.ssid, 134 cdev->private->dev_id.devno); 135 136 return ret; 137} 138 139void ccw_device_update_sense_data(struct ccw_device *cdev) 140{ 141 memset(&cdev->id, 0, sizeof(cdev->id)); 142 cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type; 143 cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model; 144 cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type; 145 cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model; 146} 147 148int ccw_device_test_sense_data(struct ccw_device *cdev) 149{ 150 return cdev->id.cu_type == 151 cdev->private->dma_area->senseid.cu_type && 152 cdev->id.cu_model == 153 cdev->private->dma_area->senseid.cu_model && 154 cdev->id.dev_type == 155 cdev->private->dma_area->senseid.dev_type && 156 cdev->id.dev_model == 157 cdev->private->dma_area->senseid.dev_model; 158} 159 160/* 161 * The machine won't give us any notification by machine check if a chpid has 162 * been varied online on the SE so we have to find out by magic (i. e. driving 163 * the channel subsystem to device selection and updating our path masks). 164 */ 165static void 166__recover_lost_chpids(struct subchannel *sch, int old_lpm) 167{ 168 int mask, i; 169 struct chp_id chpid; 170 171 chp_id_init(&chpid); 172 for (i = 0; i<8; i++) { 173 mask = 0x80 >> i; 174 if (!(sch->lpm & mask)) 175 continue; 176 if (old_lpm & mask) 177 continue; 178 chpid.id = sch->schib.pmcw.chpid[i]; 179 if (!chp_is_registered(chpid)) 180 css_schedule_eval_all(); 181 } 182} 183 184/* 185 * Stop device recognition. 186 */ 187static void 188ccw_device_recog_done(struct ccw_device *cdev, int state) 189{ 190 struct subchannel *sch; 191 int old_lpm; 192 193 sch = to_subchannel(cdev->dev.parent); 194 195 if (cio_disable_subchannel(sch)) 196 state = DEV_STATE_NOT_OPER; 197 /* 198 * Now that we tried recognition, we have performed device selection 199 * through ssch() and the path information is up to date. 200 */ 201 old_lpm = sch->lpm; 202 203 /* Check since device may again have become not operational. */ 204 if (cio_update_schib(sch)) 205 state = DEV_STATE_NOT_OPER; 206 else 207 sch->lpm = sch->schib.pmcw.pam & sch->opm; 208 209 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 210 /* Force reprobe on all chpids. */ 211 old_lpm = 0; 212 if (sch->lpm != old_lpm) 213 __recover_lost_chpids(sch, old_lpm); 214 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID && 215 (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) { 216 cdev->private->flags.recog_done = 1; 217 cdev->private->state = DEV_STATE_DISCONNECTED; 218 wake_up(&cdev->private->wait_q); 219 return; 220 } 221 switch (state) { 222 case DEV_STATE_NOT_OPER: 223 break; 224 case DEV_STATE_OFFLINE: 225 if (!cdev->online) { 226 ccw_device_update_sense_data(cdev); 227 break; 228 } 229 cdev->private->state = DEV_STATE_OFFLINE; 230 cdev->private->flags.recog_done = 1; 231 if (ccw_device_test_sense_data(cdev)) { 232 cdev->private->flags.donotify = 1; 233 ccw_device_online(cdev); 234 wake_up(&cdev->private->wait_q); 235 } else { 236 ccw_device_update_sense_data(cdev); 237 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 238 } 239 return; 240 case DEV_STATE_BOXED: 241 if (cdev->id.cu_type != 0) { /* device was recognized before */ 242 cdev->private->flags.recog_done = 1; 243 cdev->private->state = DEV_STATE_BOXED; 244 wake_up(&cdev->private->wait_q); 245 return; 246 } 247 break; 248 } 249 cdev->private->state = state; 250 io_subchannel_recog_done(cdev); 251 wake_up(&cdev->private->wait_q); 252} 253 254/* 255 * Function called from device_id.c after sense id has completed. 256 */ 257void 258ccw_device_sense_id_done(struct ccw_device *cdev, int err) 259{ 260 switch (err) { 261 case 0: 262 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 263 break; 264 case -ETIME: /* Sense id stopped by timeout. */ 265 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 266 break; 267 default: 268 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 269 break; 270 } 271} 272 273/** 274 * ccw_device_notify() - inform the device's driver about an event 275 * @cdev: device for which an event occurred 276 * @event: event that occurred 277 * 278 * Returns: 279 * -%EINVAL if the device is offline or has no driver. 280 * -%EOPNOTSUPP if the device's driver has no notifier registered. 281 * %NOTIFY_OK if the driver wants to keep the device. 282 * %NOTIFY_BAD if the driver doesn't want to keep the device. 283 */ 284int ccw_device_notify(struct ccw_device *cdev, int event) 285{ 286 int ret = -EINVAL; 287 288 if (!cdev->drv) 289 goto out; 290 if (!cdev->online) 291 goto out; 292 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 293 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 294 event); 295 if (!cdev->drv->notify) { 296 ret = -EOPNOTSUPP; 297 goto out; 298 } 299 if (cdev->drv->notify(cdev, event)) 300 ret = NOTIFY_OK; 301 else 302 ret = NOTIFY_BAD; 303out: 304 return ret; 305} 306 307static void ccw_device_oper_notify(struct ccw_device *cdev) 308{ 309 struct subchannel *sch = to_subchannel(cdev->dev.parent); 310 311 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 312 /* Reenable channel measurements, if needed. */ 313 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 314 /* Save indication for new paths. */ 315 cdev->private->path_new_mask = sch->vpm; 316 return; 317 } 318 /* Driver doesn't want device back. */ 319 ccw_device_set_notoper(cdev); 320 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 321} 322 323/* 324 * Finished with online/offline processing. 325 */ 326static void 327ccw_device_done(struct ccw_device *cdev, int state) 328{ 329 struct subchannel *sch; 330 331 sch = to_subchannel(cdev->dev.parent); 332 333 ccw_device_set_timeout(cdev, 0); 334 335 if (state != DEV_STATE_ONLINE) 336 cio_disable_subchannel(sch); 337 338 /* Reset device status. */ 339 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 340 341 cdev->private->state = state; 342 343 switch (state) { 344 case DEV_STATE_BOXED: 345 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 346 cdev->private->dev_id.devno, sch->schid.sch_no); 347 if (cdev->online && 348 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK) 349 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 350 cdev->private->flags.donotify = 0; 351 break; 352 case DEV_STATE_NOT_OPER: 353 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 354 cdev->private->dev_id.devno, sch->schid.sch_no); 355 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 356 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 357 else 358 ccw_device_set_disconnected(cdev); 359 cdev->private->flags.donotify = 0; 360 break; 361 case DEV_STATE_DISCONNECTED: 362 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 363 "%04x\n", cdev->private->dev_id.devno, 364 sch->schid.sch_no); 365 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) { 366 cdev->private->state = DEV_STATE_NOT_OPER; 367 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 368 } else 369 ccw_device_set_disconnected(cdev); 370 cdev->private->flags.donotify = 0; 371 break; 372 default: 373 break; 374 } 375 376 if (cdev->private->flags.donotify) { 377 cdev->private->flags.donotify = 0; 378 ccw_device_oper_notify(cdev); 379 } 380 wake_up(&cdev->private->wait_q); 381} 382 383/* 384 * Start device recognition. 385 */ 386void ccw_device_recognition(struct ccw_device *cdev) 387{ 388 struct subchannel *sch = to_subchannel(cdev->dev.parent); 389 390 /* 391 * We used to start here with a sense pgid to find out whether a device 392 * is locked by someone else. Unfortunately, the sense pgid command 393 * code has other meanings on devices predating the path grouping 394 * algorithm, so we start with sense id and box the device after an 395 * timeout (or if sense pgid during path verification detects the device 396 * is locked, as may happen on newer devices). 397 */ 398 cdev->private->flags.recog_done = 0; 399 cdev->private->state = DEV_STATE_SENSE_ID; 400 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) { 401 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 402 return; 403 } 404 ccw_device_sense_id_start(cdev); 405} 406 407/* 408 * Handle events for states that use the ccw request infrastructure. 409 */ 410static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) 411{ 412 switch (e) { 413 case DEV_EVENT_NOTOPER: 414 ccw_request_notoper(cdev); 415 break; 416 case DEV_EVENT_INTERRUPT: 417 ccw_request_handler(cdev); 418 break; 419 case DEV_EVENT_TIMEOUT: 420 ccw_request_timeout(cdev); 421 break; 422 default: 423 break; 424 } 425} 426 427static void ccw_device_report_path_events(struct ccw_device *cdev) 428{ 429 struct subchannel *sch = to_subchannel(cdev->dev.parent); 430 int path_event[8]; 431 int chp, mask; 432 433 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { 434 path_event[chp] = PE_NONE; 435 if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) 436 path_event[chp] |= PE_PATH_GONE; 437 if (mask & cdev->private->path_new_mask & sch->vpm) 438 path_event[chp] |= PE_PATH_AVAILABLE; 439 if (mask & cdev->private->pgid_reset_mask & sch->vpm) 440 path_event[chp] |= PE_PATHGROUP_ESTABLISHED; 441 } 442 if (cdev->online && cdev->drv->path_event) 443 cdev->drv->path_event(cdev, path_event); 444} 445 446static void ccw_device_reset_path_events(struct ccw_device *cdev) 447{ 448 cdev->private->path_gone_mask = 0; 449 cdev->private->path_new_mask = 0; 450 cdev->private->pgid_reset_mask = 0; 451} 452 453static void create_fake_irb(struct irb *irb, int type) 454{ 455 memset(irb, 0, sizeof(*irb)); 456 if (type == FAKE_CMD_IRB) { 457 struct cmd_scsw *scsw = &irb->scsw.cmd; 458 scsw->cc = 1; 459 scsw->fctl = SCSW_FCTL_START_FUNC; 460 scsw->actl = SCSW_ACTL_START_PEND; 461 scsw->stctl = SCSW_STCTL_STATUS_PEND; 462 } else if (type == FAKE_TM_IRB) { 463 struct tm_scsw *scsw = &irb->scsw.tm; 464 scsw->x = 1; 465 scsw->cc = 1; 466 scsw->fctl = SCSW_FCTL_START_FUNC; 467 scsw->actl = SCSW_ACTL_START_PEND; 468 scsw->stctl = SCSW_STCTL_STATUS_PEND; 469 } 470} 471 472static void ccw_device_handle_broken_paths(struct ccw_device *cdev) 473{ 474 struct subchannel *sch = to_subchannel(cdev->dev.parent); 475 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; 476 477 if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) 478 ccw_device_schedule_recovery(); 479 480 cdev->private->path_broken_mask = broken_paths; 481} 482 483void ccw_device_verify_done(struct ccw_device *cdev, int err) 484{ 485 struct subchannel *sch; 486 487 sch = to_subchannel(cdev->dev.parent); 488 /* Update schib - pom may have changed. */ 489 if (cio_update_schib(sch)) { 490 err = -ENODEV; 491 goto callback; 492 } 493 /* Update lpm with verified path mask. */ 494 sch->lpm = sch->vpm; 495 /* Repeat path verification? */ 496 if (cdev->private->flags.doverify) { 497 ccw_device_verify_start(cdev); 498 return; 499 } 500callback: 501 switch (err) { 502 case 0: 503 ccw_device_done(cdev, DEV_STATE_ONLINE); 504 /* Deliver fake irb to device driver, if needed. */ 505 if (cdev->private->flags.fake_irb) { 506 create_fake_irb(&cdev->private->dma_area->irb, 507 cdev->private->flags.fake_irb); 508 cdev->private->flags.fake_irb = 0; 509 if (cdev->handler) 510 cdev->handler(cdev, cdev->private->intparm, 511 &cdev->private->dma_area->irb); 512 memset(&cdev->private->dma_area->irb, 0, 513 sizeof(struct irb)); 514 } 515 ccw_device_report_path_events(cdev); 516 ccw_device_handle_broken_paths(cdev); 517 break; 518 case -ETIME: 519 case -EUSERS: 520 /* Reset oper notify indication after verify error. */ 521 cdev->private->flags.donotify = 0; 522 ccw_device_done(cdev, DEV_STATE_BOXED); 523 break; 524 case -EACCES: 525 /* Reset oper notify indication after verify error. */ 526 cdev->private->flags.donotify = 0; 527 ccw_device_done(cdev, DEV_STATE_DISCONNECTED); 528 break; 529 default: 530 /* Reset oper notify indication after verify error. */ 531 cdev->private->flags.donotify = 0; 532 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 533 break; 534 } 535 ccw_device_reset_path_events(cdev); 536} 537 538/* 539 * Get device online. 540 */ 541int 542ccw_device_online(struct ccw_device *cdev) 543{ 544 struct subchannel *sch; 545 int ret; 546 547 if ((cdev->private->state != DEV_STATE_OFFLINE) && 548 (cdev->private->state != DEV_STATE_BOXED)) 549 return -EINVAL; 550 sch = to_subchannel(cdev->dev.parent); 551 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 552 if (ret != 0) { 553 /* Couldn't enable the subchannel for i/o. Sick device. */ 554 if (ret == -ENODEV) 555 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 556 return ret; 557 } 558 /* Start initial path verification. */ 559 cdev->private->state = DEV_STATE_VERIFY; 560 ccw_device_verify_start(cdev); 561 return 0; 562} 563 564void 565ccw_device_disband_done(struct ccw_device *cdev, int err) 566{ 567 switch (err) { 568 case 0: 569 ccw_device_done(cdev, DEV_STATE_OFFLINE); 570 break; 571 case -ETIME: 572 ccw_device_done(cdev, DEV_STATE_BOXED); 573 break; 574 default: 575 cdev->private->flags.donotify = 0; 576 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 577 break; 578 } 579} 580 581/* 582 * Shutdown device. 583 */ 584int 585ccw_device_offline(struct ccw_device *cdev) 586{ 587 struct subchannel *sch; 588 589 /* Allow ccw_device_offline while disconnected. */ 590 if (cdev->private->state == DEV_STATE_DISCONNECTED || 591 cdev->private->state == DEV_STATE_NOT_OPER) { 592 cdev->private->flags.donotify = 0; 593 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 594 return 0; 595 } 596 if (cdev->private->state == DEV_STATE_BOXED) { 597 ccw_device_done(cdev, DEV_STATE_BOXED); 598 return 0; 599 } 600 if (ccw_device_is_orphan(cdev)) { 601 ccw_device_done(cdev, DEV_STATE_OFFLINE); 602 return 0; 603 } 604 sch = to_subchannel(cdev->dev.parent); 605 if (cio_update_schib(sch)) 606 return -ENODEV; 607 if (scsw_actl(&sch->schib.scsw) != 0) 608 return -EBUSY; 609 if (cdev->private->state != DEV_STATE_ONLINE) 610 return -EINVAL; 611 /* Are we doing path grouping? */ 612 if (!cdev->private->flags.pgroup) { 613 /* No, set state offline immediately. */ 614 ccw_device_done(cdev, DEV_STATE_OFFLINE); 615 return 0; 616 } 617 /* Start Set Path Group commands. */ 618 cdev->private->state = DEV_STATE_DISBAND_PGID; 619 ccw_device_disband_start(cdev); 620 return 0; 621} 622 623/* 624 * Handle not operational event in non-special state. 625 */ 626static void ccw_device_generic_notoper(struct ccw_device *cdev, 627 enum dev_event dev_event) 628{ 629 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 630 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 631 else 632 ccw_device_set_disconnected(cdev); 633} 634 635/* 636 * Handle path verification event in offline state. 637 */ 638static void ccw_device_offline_verify(struct ccw_device *cdev, 639 enum dev_event dev_event) 640{ 641 struct subchannel *sch = to_subchannel(cdev->dev.parent); 642 643 css_schedule_eval(sch->schid); 644} 645 646/* 647 * Handle path verification event. 648 */ 649static void 650ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 651{ 652 struct subchannel *sch; 653 654 if (cdev->private->state == DEV_STATE_W4SENSE) { 655 cdev->private->flags.doverify = 1; 656 return; 657 } 658 sch = to_subchannel(cdev->dev.parent); 659 /* 660 * Since we might not just be coming from an interrupt from the 661 * subchannel we have to update the schib. 662 */ 663 if (cio_update_schib(sch)) { 664 ccw_device_verify_done(cdev, -ENODEV); 665 return; 666 } 667 668 if (scsw_actl(&sch->schib.scsw) != 0 || 669 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || 670 (scsw_stctl(&cdev->private->dma_area->irb.scsw) & 671 SCSW_STCTL_STATUS_PEND)) { 672 /* 673 * No final status yet or final status not yet delivered 674 * to the device driver. Can't do path verification now, 675 * delay until final status was delivered. 676 */ 677 cdev->private->flags.doverify = 1; 678 return; 679 } 680 /* Device is idle, we can do the path verification. */ 681 cdev->private->state = DEV_STATE_VERIFY; 682 ccw_device_verify_start(cdev); 683} 684 685/* 686 * Handle path verification event in boxed state. 687 */ 688static void ccw_device_boxed_verify(struct ccw_device *cdev, 689 enum dev_event dev_event) 690{ 691 struct subchannel *sch = to_subchannel(cdev->dev.parent); 692 693 if (cdev->online) { 694 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) 695 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 696 else 697 ccw_device_online_verify(cdev, dev_event); 698 } else 699 css_schedule_eval(sch->schid); 700} 701 702/* 703 * Pass interrupt to device driver. 704 */ 705static int ccw_device_call_handler(struct ccw_device *cdev) 706{ 707 unsigned int stctl; 708 int ending_status; 709 710 /* 711 * we allow for the device action handler if . 712 * - we received ending status 713 * - the action handler requested to see all interrupts 714 * - we received an intermediate status 715 * - fast notification was requested (primary status) 716 * - unsolicited interrupts 717 */ 718 stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw); 719 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 720 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 721 (stctl == SCSW_STCTL_STATUS_PEND); 722 if (!ending_status && 723 !cdev->private->options.repall && 724 !(stctl & SCSW_STCTL_INTER_STATUS) && 725 !(cdev->private->options.fast && 726 (stctl & SCSW_STCTL_PRIM_STATUS))) 727 return 0; 728 729 if (ending_status) 730 ccw_device_set_timeout(cdev, 0); 731 732 if (cdev->handler) 733 cdev->handler(cdev, cdev->private->intparm, 734 &cdev->private->dma_area->irb); 735 736 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 737 return 1; 738} 739 740/* 741 * Got an interrupt for a normal io (state online). 742 */ 743static void 744ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 745{ 746 struct irb *irb; 747 int is_cmd; 748 749 irb = this_cpu_ptr(&cio_irb); 750 is_cmd = !scsw_is_tm(&irb->scsw); 751 /* Check for unsolicited interrupt. */ 752 if (!scsw_is_solicited(&irb->scsw)) { 753 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 754 !irb->esw.esw0.erw.cons) { 755 /* Unit check but no sense data. Need basic sense. */ 756 if (ccw_device_do_sense(cdev, irb) != 0) 757 goto call_handler_unsol; 758 memcpy(&cdev->private->dma_area->irb, irb, 759 sizeof(struct irb)); 760 cdev->private->state = DEV_STATE_W4SENSE; 761 cdev->private->intparm = 0; 762 return; 763 } 764call_handler_unsol: 765 if (cdev->handler) 766 cdev->handler (cdev, 0, irb); 767 if (cdev->private->flags.doverify) 768 ccw_device_online_verify(cdev, 0); 769 return; 770 } 771 /* Accumulate status and find out if a basic sense is needed. */ 772 ccw_device_accumulate_irb(cdev, irb); 773 if (is_cmd && cdev->private->flags.dosense) { 774 if (ccw_device_do_sense(cdev, irb) == 0) { 775 cdev->private->state = DEV_STATE_W4SENSE; 776 } 777 return; 778 } 779 /* Call the handler. */ 780 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 781 /* Start delayed path verification. */ 782 ccw_device_online_verify(cdev, 0); 783} 784 785/* 786 * Got an timeout in online state. 787 */ 788static void 789ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 790{ 791 int ret; 792 793 ccw_device_set_timeout(cdev, 0); 794 cdev->private->iretry = 255; 795 cdev->private->async_kill_io_rc = -ETIMEDOUT; 796 ret = ccw_device_cancel_halt_clear(cdev); 797 if (ret == -EBUSY) { 798 ccw_device_set_timeout(cdev, 3*HZ); 799 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 800 return; 801 } 802 if (ret) 803 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 804 else if (cdev->handler) 805 cdev->handler(cdev, cdev->private->intparm, 806 ERR_PTR(-ETIMEDOUT)); 807} 808 809/* 810 * Got an interrupt for a basic sense. 811 */ 812static void 813ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 814{ 815 struct irb *irb; 816 817 irb = this_cpu_ptr(&cio_irb); 818 /* Check for unsolicited interrupt. */ 819 if (scsw_stctl(&irb->scsw) == 820 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 821 if (scsw_cc(&irb->scsw) == 1) 822 /* Basic sense hasn't started. Try again. */ 823 ccw_device_do_sense(cdev, irb); 824 else { 825 CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited " 826 "interrupt during w4sense...\n", 827 cdev->private->dev_id.ssid, 828 cdev->private->dev_id.devno); 829 if (cdev->handler) 830 cdev->handler (cdev, 0, irb); 831 } 832 return; 833 } 834 /* 835 * Check if a halt or clear has been issued in the meanwhile. If yes, 836 * only deliver the halt/clear interrupt to the device driver as if it 837 * had killed the original request. 838 */ 839 if (scsw_fctl(&irb->scsw) & 840 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 841 cdev->private->flags.dosense = 0; 842 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); 843 ccw_device_accumulate_irb(cdev, irb); 844 goto call_handler; 845 } 846 /* Add basic sense info to irb. */ 847 ccw_device_accumulate_basic_sense(cdev, irb); 848 if (cdev->private->flags.dosense) { 849 /* Another basic sense is needed. */ 850 ccw_device_do_sense(cdev, irb); 851 return; 852 } 853call_handler: 854 cdev->private->state = DEV_STATE_ONLINE; 855 /* In case sensing interfered with setting the device online */ 856 wake_up(&cdev->private->wait_q); 857 /* Call the handler. */ 858 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 859 /* Start delayed path verification. */ 860 ccw_device_online_verify(cdev, 0); 861} 862 863static void 864ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 865{ 866 ccw_device_set_timeout(cdev, 0); 867 /* Start delayed path verification. */ 868 ccw_device_online_verify(cdev, 0); 869 /* OK, i/o is dead now. Call interrupt handler. */ 870 if (cdev->handler) 871 cdev->handler(cdev, cdev->private->intparm, 872 ERR_PTR(cdev->private->async_kill_io_rc)); 873} 874 875static void 876ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 877{ 878 int ret; 879 880 ret = ccw_device_cancel_halt_clear(cdev); 881 if (ret == -EBUSY) { 882 ccw_device_set_timeout(cdev, 3*HZ); 883 return; 884 } 885 /* Start delayed path verification. */ 886 ccw_device_online_verify(cdev, 0); 887 if (cdev->handler) 888 cdev->handler(cdev, cdev->private->intparm, 889 ERR_PTR(cdev->private->async_kill_io_rc)); 890} 891 892void ccw_device_kill_io(struct ccw_device *cdev) 893{ 894 int ret; 895 896 ccw_device_set_timeout(cdev, 0); 897 cdev->private->iretry = 255; 898 cdev->private->async_kill_io_rc = -EIO; 899 ret = ccw_device_cancel_halt_clear(cdev); 900 if (ret == -EBUSY) { 901 ccw_device_set_timeout(cdev, 3*HZ); 902 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 903 return; 904 } 905 /* Start delayed path verification. */ 906 ccw_device_online_verify(cdev, 0); 907 if (cdev->handler) 908 cdev->handler(cdev, cdev->private->intparm, 909 ERR_PTR(-EIO)); 910} 911 912static void 913ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 914{ 915 /* Start verification after current task finished. */ 916 cdev->private->flags.doverify = 1; 917} 918 919static void 920ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 921{ 922 struct subchannel *sch; 923 924 sch = to_subchannel(cdev->dev.parent); 925 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 926 /* Couldn't enable the subchannel for i/o. Sick device. */ 927 return; 928 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 929 ccw_device_sense_id_start(cdev); 930} 931 932void ccw_device_trigger_reprobe(struct ccw_device *cdev) 933{ 934 struct subchannel *sch; 935 936 if (cdev->private->state != DEV_STATE_DISCONNECTED) 937 return; 938 939 sch = to_subchannel(cdev->dev.parent); 940 /* Update some values. */ 941 if (cio_update_schib(sch)) 942 return; 943 /* 944 * The pim, pam, pom values may not be accurate, but they are the best 945 * we have before performing device selection :/ 946 */ 947 sch->lpm = sch->schib.pmcw.pam & sch->opm; 948 /* 949 * Use the initial configuration since we can't be shure that the old 950 * paths are valid. 951 */ 952 io_subchannel_init_config(sch); 953 if (cio_commit_config(sch)) 954 return; 955 956 /* We should also udate ssd info, but this has to wait. */ 957 /* Check if this is another device which appeared on the same sch. */ 958 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) 959 css_schedule_eval(sch->schid); 960 else 961 ccw_device_start_id(cdev, 0); 962} 963 964static void ccw_device_disabled_irq(struct ccw_device *cdev, 965 enum dev_event dev_event) 966{ 967 struct subchannel *sch; 968 969 sch = to_subchannel(cdev->dev.parent); 970 /* 971 * An interrupt in a disabled state means a previous disable was not 972 * successful - should not happen, but we try to disable again. 973 */ 974 cio_disable_subchannel(sch); 975} 976 977static void 978ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 979{ 980 retry_set_schib(cdev); 981 cdev->private->state = DEV_STATE_ONLINE; 982 dev_fsm_event(cdev, dev_event); 983} 984 985static void ccw_device_update_cmfblock(struct ccw_device *cdev, 986 enum dev_event dev_event) 987{ 988 cmf_retry_copy_block(cdev); 989 cdev->private->state = DEV_STATE_ONLINE; 990 dev_fsm_event(cdev, dev_event); 991} 992 993static void 994ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 995{ 996 ccw_device_set_timeout(cdev, 0); 997 cdev->private->state = DEV_STATE_NOT_OPER; 998 wake_up(&cdev->private->wait_q); 999} 1000 1001static void 1002ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1003{ 1004 int ret; 1005 1006 ret = ccw_device_cancel_halt_clear(cdev); 1007 if (ret == -EBUSY) { 1008 ccw_device_set_timeout(cdev, HZ/10); 1009 } else { 1010 cdev->private->state = DEV_STATE_NOT_OPER; 1011 wake_up(&cdev->private->wait_q); 1012 } 1013} 1014 1015/* 1016 * No operation action. This is used e.g. to ignore a timeout event in 1017 * state offline. 1018 */ 1019static void 1020ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1021{ 1022} 1023 1024/* 1025 * device statemachine 1026 */ 1027fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1028 [DEV_STATE_NOT_OPER] = { 1029 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1030 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1031 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1032 [DEV_EVENT_VERIFY] = ccw_device_nop, 1033 }, 1034 [DEV_STATE_SENSE_ID] = { 1035 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1036 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1037 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1038 [DEV_EVENT_VERIFY] = ccw_device_nop, 1039 }, 1040 [DEV_STATE_OFFLINE] = { 1041 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1042 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, 1043 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1044 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 1045 }, 1046 [DEV_STATE_VERIFY] = { 1047 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1048 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1049 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1050 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1051 }, 1052 [DEV_STATE_ONLINE] = { 1053 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1054 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1055 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1056 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1057 }, 1058 [DEV_STATE_W4SENSE] = { 1059 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1060 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1061 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1062 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1063 }, 1064 [DEV_STATE_DISBAND_PGID] = { 1065 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1066 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1067 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1068 [DEV_EVENT_VERIFY] = ccw_device_nop, 1069 }, 1070 [DEV_STATE_BOXED] = { 1071 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1072 [DEV_EVENT_INTERRUPT] = ccw_device_nop, 1073 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1074 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify, 1075 }, 1076 /* states to wait for i/o completion before doing something */ 1077 [DEV_STATE_TIMEOUT_KILL] = { 1078 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1079 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1080 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1081 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1082 }, 1083 [DEV_STATE_QUIESCE] = { 1084 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1085 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1086 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1087 [DEV_EVENT_VERIFY] = ccw_device_nop, 1088 }, 1089 /* special states for devices gone not operational */ 1090 [DEV_STATE_DISCONNECTED] = { 1091 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1092 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1093 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1094 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1095 }, 1096 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1097 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1098 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1099 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1100 [DEV_EVENT_VERIFY] = ccw_device_nop, 1101 }, 1102 [DEV_STATE_CMFCHANGE] = { 1103 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1104 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1105 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1106 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1107 }, 1108 [DEV_STATE_CMFUPDATE] = { 1109 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1110 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1111 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1112 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1113 }, 1114 [DEV_STATE_STEAL_LOCK] = { 1115 [DEV_EVENT_NOTOPER] = ccw_device_request_event, 1116 [DEV_EVENT_INTERRUPT] = ccw_device_request_event, 1117 [DEV_EVENT_TIMEOUT] = ccw_device_request_event, 1118 [DEV_EVENT_VERIFY] = ccw_device_nop, 1119 }, 1120}; 1121 1122EXPORT_SYMBOL_GPL(ccw_device_set_timeout);