aic94xx_task.c (16268B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Aic94xx SAS/SATA Tasks 4 * 5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 7 */ 8 9#include <linux/spinlock.h> 10#include "aic94xx.h" 11#include "aic94xx_sas.h" 12#include "aic94xx_hwi.h" 13 14static void asd_unbuild_ata_ascb(struct asd_ascb *a); 15static void asd_unbuild_smp_ascb(struct asd_ascb *a); 16static void asd_unbuild_ssp_ascb(struct asd_ascb *a); 17 18static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) 19{ 20 unsigned long flags; 21 22 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); 23 asd_ha->seq.can_queue += num; 24 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 25} 26 27/* DMA_... to our direction translation. 28 */ 29static const u8 data_dir_flags[] = { 30 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ 31 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */ 32 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */ 33 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ 34}; 35 36static int asd_map_scatterlist(struct sas_task *task, 37 struct sg_el *sg_arr, 38 gfp_t gfp_flags) 39{ 40 struct asd_ascb *ascb = task->lldd_task; 41 struct asd_ha_struct *asd_ha = ascb->ha; 42 struct scatterlist *sc; 43 int num_sg, res; 44 45 if (task->data_dir == DMA_NONE) 46 return 0; 47 48 if (task->num_scatter == 0) { 49 void *p = task->scatter; 50 dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p, 51 task->total_xfer_len, 52 task->data_dir); 53 sg_arr[0].bus_addr = cpu_to_le64((u64)dma); 54 sg_arr[0].size = cpu_to_le32(task->total_xfer_len); 55 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; 56 return 0; 57 } 58 59 /* STP tasks come from libata which has already mapped 60 * the SG list */ 61 if (sas_protocol_ata(task->task_proto)) 62 num_sg = task->num_scatter; 63 else 64 num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter, 65 task->num_scatter, task->data_dir); 66 if (num_sg == 0) 67 return -ENOMEM; 68 69 if (num_sg > 3) { 70 int i; 71 72 ascb->sg_arr = asd_alloc_coherent(asd_ha, 73 num_sg*sizeof(struct sg_el), 74 gfp_flags); 75 if (!ascb->sg_arr) { 76 res = -ENOMEM; 77 goto err_unmap; 78 } 79 for_each_sg(task->scatter, sc, num_sg, i) { 80 struct sg_el *sg = 81 &((struct sg_el *)ascb->sg_arr->vaddr)[i]; 82 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); 83 sg->size = cpu_to_le32((u32)sg_dma_len(sc)); 84 if (i == num_sg-1) 85 sg->flags |= ASD_SG_EL_LIST_EOL; 86 } 87 88 for_each_sg(task->scatter, sc, 2, i) { 89 sg_arr[i].bus_addr = 90 cpu_to_le64((u64)sg_dma_address(sc)); 91 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 92 } 93 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); 94 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; 95 96 memset(&sg_arr[2], 0, sizeof(*sg_arr)); 97 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); 98 } else { 99 int i; 100 for_each_sg(task->scatter, sc, num_sg, i) { 101 sg_arr[i].bus_addr = 102 cpu_to_le64((u64)sg_dma_address(sc)); 103 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); 104 } 105 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; 106 } 107 108 return 0; 109err_unmap: 110 if (sas_protocol_ata(task->task_proto)) 111 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, 112 task->num_scatter, task->data_dir); 113 return res; 114} 115 116static void asd_unmap_scatterlist(struct asd_ascb *ascb) 117{ 118 struct asd_ha_struct *asd_ha = ascb->ha; 119 struct sas_task *task = ascb->uldd_task; 120 121 if (task->data_dir == DMA_NONE) 122 return; 123 124 if (task->num_scatter == 0) { 125 dma_addr_t dma = (dma_addr_t) 126 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); 127 dma_unmap_single(&ascb->ha->pcidev->dev, dma, 128 task->total_xfer_len, task->data_dir); 129 return; 130 } 131 132 asd_free_coherent(asd_ha, ascb->sg_arr); 133 if (task->task_proto != SAS_PROTOCOL_STP) 134 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter, 135 task->num_scatter, task->data_dir); 136} 137 138/* ---------- Task complete tasklet ---------- */ 139 140static void asd_get_response_tasklet(struct asd_ascb *ascb, 141 struct done_list_struct *dl) 142{ 143 struct asd_ha_struct *asd_ha = ascb->ha; 144 struct sas_task *task = ascb->uldd_task; 145 struct task_status_struct *ts = &task->task_status; 146 unsigned long flags; 147 struct tc_resp_sb_struct { 148 __le16 index_escb; 149 u8 len_lsb; 150 u8 flags; 151 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; 152 153/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ 154 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; 155 struct asd_ascb *escb; 156 struct asd_dma_tok *edb; 157 void *r; 158 159 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); 160 escb = asd_tc_index_find(&asd_ha->seq, 161 (int)le16_to_cpu(resp_sb->index_escb)); 162 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); 163 164 if (!escb) { 165 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); 166 return; 167 } 168 169 ts->buf_valid_size = 0; 170 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; 171 r = edb->vaddr; 172 if (task->task_proto == SAS_PROTOCOL_SSP) { 173 struct ssp_response_iu *iu = 174 r + 16 + sizeof(struct ssp_frame_hdr); 175 176 ts->residual = le32_to_cpu(*(__le32 *)r); 177 178 sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); 179 } else { 180 struct ata_task_resp *resp = (void *) &ts->buf[0]; 181 182 ts->residual = le32_to_cpu(*(__le32 *)r); 183 184 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { 185 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); 186 memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE); 187 ts->buf_valid_size = sizeof(*resp); 188 } 189 } 190 191 asd_invalidate_edb(escb, edb_id); 192} 193 194static void asd_task_tasklet_complete(struct asd_ascb *ascb, 195 struct done_list_struct *dl) 196{ 197 struct sas_task *task = ascb->uldd_task; 198 struct task_status_struct *ts = &task->task_status; 199 unsigned long flags; 200 u8 opcode = dl->opcode; 201 202 asd_can_dequeue(ascb->ha, 1); 203 204Again: 205 switch (opcode) { 206 case TC_NO_ERROR: 207 ts->resp = SAS_TASK_COMPLETE; 208 ts->stat = SAS_SAM_STAT_GOOD; 209 break; 210 case TC_UNDERRUN: 211 ts->resp = SAS_TASK_COMPLETE; 212 ts->stat = SAS_DATA_UNDERRUN; 213 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); 214 break; 215 case TC_OVERRUN: 216 ts->resp = SAS_TASK_COMPLETE; 217 ts->stat = SAS_DATA_OVERRUN; 218 ts->residual = 0; 219 break; 220 case TC_SSP_RESP: 221 case TC_ATA_RESP: 222 ts->resp = SAS_TASK_COMPLETE; 223 ts->stat = SAS_PROTO_RESPONSE; 224 asd_get_response_tasklet(ascb, dl); 225 break; 226 case TF_OPEN_REJECT: 227 ts->resp = SAS_TASK_UNDELIVERED; 228 ts->stat = SAS_OPEN_REJECT; 229 if (dl->status_block[1] & 2) 230 ts->open_rej_reason = 1 + dl->status_block[2]; 231 else if (dl->status_block[1] & 1) 232 ts->open_rej_reason = (dl->status_block[2] >> 4)+10; 233 else 234 ts->open_rej_reason = SAS_OREJ_UNKNOWN; 235 break; 236 case TF_OPEN_TO: 237 ts->resp = SAS_TASK_UNDELIVERED; 238 ts->stat = SAS_OPEN_TO; 239 break; 240 case TF_PHY_DOWN: 241 case TU_PHY_DOWN: 242 ts->resp = SAS_TASK_UNDELIVERED; 243 ts->stat = SAS_PHY_DOWN; 244 break; 245 case TI_PHY_DOWN: 246 ts->resp = SAS_TASK_COMPLETE; 247 ts->stat = SAS_PHY_DOWN; 248 break; 249 case TI_BREAK: 250 case TI_PROTO_ERR: 251 case TI_NAK: 252 case TI_ACK_NAK_TO: 253 case TF_SMP_XMIT_RCV_ERR: 254 case TC_ATA_R_ERR_RECV: 255 ts->resp = SAS_TASK_COMPLETE; 256 ts->stat = SAS_INTERRUPTED; 257 break; 258 case TF_BREAK: 259 case TU_BREAK: 260 case TU_ACK_NAK_TO: 261 case TF_SMPRSP_TO: 262 ts->resp = SAS_TASK_UNDELIVERED; 263 ts->stat = SAS_DEV_NO_RESPONSE; 264 break; 265 case TF_NAK_RECV: 266 ts->resp = SAS_TASK_COMPLETE; 267 ts->stat = SAS_NAK_R_ERR; 268 break; 269 case TA_I_T_NEXUS_LOSS: 270 opcode = dl->status_block[0]; 271 goto Again; 272 case TF_INV_CONN_HANDLE: 273 ts->resp = SAS_TASK_UNDELIVERED; 274 ts->stat = SAS_DEVICE_UNKNOWN; 275 break; 276 case TF_REQUESTED_N_PENDING: 277 ts->resp = SAS_TASK_UNDELIVERED; 278 ts->stat = SAS_PENDING; 279 break; 280 case TC_TASK_CLEARED: 281 case TA_ON_REQ: 282 ts->resp = SAS_TASK_COMPLETE; 283 ts->stat = SAS_ABORTED_TASK; 284 break; 285 286 case TF_NO_SMP_CONN: 287 case TF_TMF_NO_CTX: 288 case TF_TMF_NO_TAG: 289 case TF_TMF_TAG_FREE: 290 case TF_TMF_TASK_DONE: 291 case TF_TMF_NO_CONN_HANDLE: 292 case TF_IRTT_TO: 293 case TF_IU_SHORT: 294 case TF_DATA_OFFS_ERR: 295 ts->resp = SAS_TASK_UNDELIVERED; 296 ts->stat = SAS_DEV_NO_RESPONSE; 297 break; 298 299 case TC_LINK_ADM_RESP: 300 case TC_CONTROL_PHY: 301 case TC_RESUME: 302 case TC_PARTIAL_SG_LIST: 303 default: 304 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode); 305 break; 306 } 307 308 switch (task->task_proto) { 309 case SAS_PROTOCOL_SATA: 310 case SAS_PROTOCOL_STP: 311 asd_unbuild_ata_ascb(ascb); 312 break; 313 case SAS_PROTOCOL_SMP: 314 asd_unbuild_smp_ascb(ascb); 315 break; 316 case SAS_PROTOCOL_SSP: 317 asd_unbuild_ssp_ascb(ascb); 318 break; 319 default: 320 break; 321 } 322 323 spin_lock_irqsave(&task->task_state_lock, flags); 324 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 325 task->task_state_flags |= SAS_TASK_STATE_DONE; 326 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { 327 struct completion *completion = ascb->completion; 328 spin_unlock_irqrestore(&task->task_state_lock, flags); 329 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " 330 "stat 0x%x but aborted by upper layer!\n", 331 task, opcode, ts->resp, ts->stat); 332 if (completion) 333 complete(completion); 334 } else { 335 spin_unlock_irqrestore(&task->task_state_lock, flags); 336 task->lldd_task = NULL; 337 asd_ascb_free(ascb); 338 mb(); 339 task->task_done(task); 340 } 341} 342 343/* ---------- ATA ---------- */ 344 345static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, 346 gfp_t gfp_flags) 347{ 348 struct domain_device *dev = task->dev; 349 struct scb *scb; 350 u8 flags; 351 int res = 0; 352 353 scb = ascb->scb; 354 355 if (unlikely(task->ata_task.device_control_reg_update)) 356 scb->header.opcode = CONTROL_ATA_DEV; 357 else if (dev->sata_dev.class == ATA_DEV_ATAPI) 358 scb->header.opcode = INITIATE_ATAPI_TASK; 359 else 360 scb->header.opcode = INITIATE_ATA_TASK; 361 362 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ 363 if (dev->port->oob_mode == SAS_OOB_MODE) 364 scb->ata_task.proto_conn_rate |= dev->linkrate; 365 366 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); 367 scb->ata_task.fis = task->ata_task.fis; 368 if (likely(!task->ata_task.device_control_reg_update)) 369 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 370 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ 371 if (dev->sata_dev.class == ATA_DEV_ATAPI) 372 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, 373 16); 374 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); 375 scb->ata_task.conn_handle = cpu_to_le16( 376 (u16)(unsigned long)dev->lldd_dev); 377 378 if (likely(!task->ata_task.device_control_reg_update)) { 379 flags = 0; 380 if (task->ata_task.dma_xfer) 381 flags |= DATA_XFER_MODE_DMA; 382 if (task->ata_task.use_ncq && 383 dev->sata_dev.class != ATA_DEV_ATAPI) 384 flags |= ATA_Q_TYPE_NCQ; 385 flags |= data_dir_flags[task->data_dir]; 386 scb->ata_task.ata_flags = flags; 387 388 scb->ata_task.retry_count = task->ata_task.retry_count; 389 390 flags = 0; 391 if (task->ata_task.set_affil_pol) 392 flags |= SET_AFFIL_POLICY; 393 if (task->ata_task.stp_affil_pol) 394 flags |= STP_AFFIL_POLICY; 395 scb->ata_task.flags = flags; 396 } 397 ascb->tasklet_complete = asd_task_tasklet_complete; 398 399 if (likely(!task->ata_task.device_control_reg_update)) 400 res = asd_map_scatterlist(task, scb->ata_task.sg_element, 401 gfp_flags); 402 403 return res; 404} 405 406static void asd_unbuild_ata_ascb(struct asd_ascb *a) 407{ 408 asd_unmap_scatterlist(a); 409} 410 411/* ---------- SMP ---------- */ 412 413static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, 414 gfp_t gfp_flags) 415{ 416 struct asd_ha_struct *asd_ha = ascb->ha; 417 struct domain_device *dev = task->dev; 418 struct scb *scb; 419 420 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1, 421 DMA_TO_DEVICE); 422 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1, 423 DMA_FROM_DEVICE); 424 425 scb = ascb->scb; 426 427 scb->header.opcode = INITIATE_SMP_TASK; 428 429 scb->smp_task.proto_conn_rate = dev->linkrate; 430 431 scb->smp_task.smp_req.bus_addr = 432 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); 433 scb->smp_task.smp_req.size = 434 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); 435 436 scb->smp_task.smp_resp.bus_addr = 437 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); 438 scb->smp_task.smp_resp.size = 439 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); 440 441 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); 442 scb->smp_task.conn_handle = cpu_to_le16((u16) 443 (unsigned long)dev->lldd_dev); 444 445 ascb->tasklet_complete = asd_task_tasklet_complete; 446 447 return 0; 448} 449 450static void asd_unbuild_smp_ascb(struct asd_ascb *a) 451{ 452 struct sas_task *task = a->uldd_task; 453 454 BUG_ON(!task); 455 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1, 456 DMA_TO_DEVICE); 457 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1, 458 DMA_FROM_DEVICE); 459} 460 461/* ---------- SSP ---------- */ 462 463static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, 464 gfp_t gfp_flags) 465{ 466 struct domain_device *dev = task->dev; 467 struct scb *scb; 468 int res = 0; 469 470 scb = ascb->scb; 471 472 scb->header.opcode = INITIATE_SSP_TASK; 473 474 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ 475 scb->ssp_task.proto_conn_rate |= dev->linkrate; 476 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); 477 scb->ssp_task.ssp_frame.frame_type = SSP_DATA; 478 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, 479 HASHED_SAS_ADDR_SIZE); 480 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, 481 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 482 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); 483 484 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); 485 if (task->ssp_task.enable_first_burst) 486 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; 487 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); 488 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); 489 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd, 490 task->ssp_task.cmd->cmd_len); 491 492 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); 493 scb->ssp_task.conn_handle = cpu_to_le16( 494 (u16)(unsigned long)dev->lldd_dev); 495 scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; 496 scb->ssp_task.retry_count = scb->ssp_task.retry_count; 497 498 ascb->tasklet_complete = asd_task_tasklet_complete; 499 500 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); 501 502 return res; 503} 504 505static void asd_unbuild_ssp_ascb(struct asd_ascb *a) 506{ 507 asd_unmap_scatterlist(a); 508} 509 510/* ---------- Execute Task ---------- */ 511 512static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) 513{ 514 int res = 0; 515 unsigned long flags; 516 517 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); 518 if ((asd_ha->seq.can_queue - num) < 0) 519 res = -SAS_QUEUE_FULL; 520 else 521 asd_ha->seq.can_queue -= num; 522 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 523 524 return res; 525} 526 527int asd_execute_task(struct sas_task *task, gfp_t gfp_flags) 528{ 529 int res = 0; 530 LIST_HEAD(alist); 531 struct sas_task *t = task; 532 struct asd_ascb *ascb = NULL, *a; 533 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 534 535 res = asd_can_queue(asd_ha, 1); 536 if (res) 537 return res; 538 539 res = 1; 540 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); 541 if (res) { 542 res = -ENOMEM; 543 goto out_err; 544 } 545 546 __list_add(&alist, ascb->list.prev, &ascb->list); 547 list_for_each_entry(a, &alist, list) { 548 a->uldd_task = t; 549 t->lldd_task = a; 550 break; 551 } 552 list_for_each_entry(a, &alist, list) { 553 t = a->uldd_task; 554 a->uldd_timer = 1; 555 if (t->task_proto & SAS_PROTOCOL_STP) 556 t->task_proto = SAS_PROTOCOL_STP; 557 switch (t->task_proto) { 558 case SAS_PROTOCOL_SATA: 559 case SAS_PROTOCOL_STP: 560 res = asd_build_ata_ascb(a, t, gfp_flags); 561 break; 562 case SAS_PROTOCOL_SMP: 563 res = asd_build_smp_ascb(a, t, gfp_flags); 564 break; 565 case SAS_PROTOCOL_SSP: 566 res = asd_build_ssp_ascb(a, t, gfp_flags); 567 break; 568 default: 569 asd_printk("unknown sas_task proto: 0x%x\n", 570 t->task_proto); 571 res = -ENOMEM; 572 break; 573 } 574 if (res) 575 goto out_err_unmap; 576 } 577 list_del_init(&alist); 578 579 res = asd_post_ascb_list(asd_ha, ascb, 1); 580 if (unlikely(res)) { 581 a = NULL; 582 __list_add(&alist, ascb->list.prev, &ascb->list); 583 goto out_err_unmap; 584 } 585 586 return 0; 587out_err_unmap: 588 { 589 struct asd_ascb *b = a; 590 list_for_each_entry(a, &alist, list) { 591 if (a == b) 592 break; 593 t = a->uldd_task; 594 switch (t->task_proto) { 595 case SAS_PROTOCOL_SATA: 596 case SAS_PROTOCOL_STP: 597 asd_unbuild_ata_ascb(a); 598 break; 599 case SAS_PROTOCOL_SMP: 600 asd_unbuild_smp_ascb(a); 601 break; 602 case SAS_PROTOCOL_SSP: 603 asd_unbuild_ssp_ascb(a); 604 break; 605 default: 606 break; 607 } 608 t->lldd_task = NULL; 609 } 610 } 611 list_del_init(&alist); 612out_err: 613 if (ascb) 614 asd_ascb_free_list(ascb); 615 asd_can_dequeue(asd_ha, 1); 616 return res; 617}