dpcsup.c (12093B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Adaptec AAC series RAID controller driver 4 * (c) Copyright 2001 Red Hat Inc. 5 * 6 * based on the old aacraid driver that is.. 7 * Adaptec aacraid device driver for Linux. 8 * 9 * Copyright (c) 2000-2010 Adaptec, Inc. 10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 12 * 13 * Module Name: 14 * dpcsup.c 15 * 16 * Abstract: All DPC processing routines for the cyclone board occur here. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/init.h> 21#include <linux/types.h> 22#include <linux/spinlock.h> 23#include <linux/slab.h> 24#include <linux/completion.h> 25#include <linux/blkdev.h> 26 27#include "aacraid.h" 28 29/** 30 * aac_response_normal - Handle command replies 31 * @q: Queue to read from 32 * 33 * This DPC routine will be run when the adapter interrupts us to let us 34 * know there is a response on our normal priority queue. We will pull off 35 * all QE there are and wake up all the waiters before exiting. We will 36 * take a spinlock out on the queue before operating on it. 37 */ 38 39unsigned int aac_response_normal(struct aac_queue * q) 40{ 41 struct aac_dev * dev = q->dev; 42 struct aac_entry *entry; 43 struct hw_fib * hwfib; 44 struct fib * fib; 45 int consumed = 0; 46 unsigned long flags, mflags; 47 48 spin_lock_irqsave(q->lock, flags); 49 /* 50 * Keep pulling response QEs off the response queue and waking 51 * up the waiters until there are no more QEs. We then return 52 * back to the system. If no response was requested we just 53 * deallocate the Fib here and continue. 54 */ 55 while(aac_consumer_get(dev, q, &entry)) 56 { 57 int fast; 58 u32 index = le32_to_cpu(entry->addr); 59 fast = index & 0x01; 60 fib = &dev->fibs[index >> 2]; 61 hwfib = fib->hw_fib_va; 62 63 aac_consumer_free(dev, q, HostNormRespQueue); 64 /* 65 * Remove this fib from the Outstanding I/O queue. 66 * But only if it has not already been timed out. 67 * 68 * If the fib has been timed out already, then just 69 * continue. The caller has already been notified that 70 * the fib timed out. 71 */ 72 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); 73 74 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 75 spin_unlock_irqrestore(q->lock, flags); 76 aac_fib_complete(fib); 77 aac_fib_free(fib); 78 spin_lock_irqsave(q->lock, flags); 79 continue; 80 } 81 spin_unlock_irqrestore(q->lock, flags); 82 83 if (fast) { 84 /* 85 * Doctor the fib 86 */ 87 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 88 hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); 89 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 90 } 91 92 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 93 94 if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) 95 { 96 __le32 *pstatus = (__le32 *)hwfib->data; 97 if (*pstatus & cpu_to_le32(0xffff0000)) 98 *pstatus = cpu_to_le32(ST_OK); 99 } 100 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 101 { 102 if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) { 103 FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); 104 } else { 105 FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); 106 } 107 /* 108 * NOTE: we cannot touch the fib after this 109 * call, because it may have been deallocated. 110 */ 111 fib->callback(fib->callback_data, fib); 112 } else { 113 unsigned long flagv; 114 spin_lock_irqsave(&fib->event_lock, flagv); 115 if (!fib->done) { 116 fib->done = 1; 117 complete(&fib->event_wait); 118 } 119 spin_unlock_irqrestore(&fib->event_lock, flagv); 120 121 spin_lock_irqsave(&dev->manage_lock, mflags); 122 dev->management_fib_count--; 123 spin_unlock_irqrestore(&dev->manage_lock, mflags); 124 125 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 126 if (fib->done == 2) { 127 spin_lock_irqsave(&fib->event_lock, flagv); 128 fib->done = 0; 129 spin_unlock_irqrestore(&fib->event_lock, flagv); 130 aac_fib_complete(fib); 131 aac_fib_free(fib); 132 } 133 } 134 consumed++; 135 spin_lock_irqsave(q->lock, flags); 136 } 137 138 if (consumed > aac_config.peak_fibs) 139 aac_config.peak_fibs = consumed; 140 if (consumed == 0) 141 aac_config.zero_fibs++; 142 143 spin_unlock_irqrestore(q->lock, flags); 144 return 0; 145} 146 147 148/** 149 * aac_command_normal - handle commands 150 * @q: queue to process 151 * 152 * This DPC routine will be queued when the adapter interrupts us to 153 * let us know there is a command on our normal priority queue. We will 154 * pull off all QE there are and wake up all the waiters before exiting. 155 * We will take a spinlock out on the queue before operating on it. 156 */ 157 158unsigned int aac_command_normal(struct aac_queue *q) 159{ 160 struct aac_dev * dev = q->dev; 161 struct aac_entry *entry; 162 unsigned long flags; 163 164 spin_lock_irqsave(q->lock, flags); 165 166 /* 167 * Keep pulling response QEs off the response queue and waking 168 * up the waiters until there are no more QEs. We then return 169 * back to the system. 170 */ 171 while(aac_consumer_get(dev, q, &entry)) 172 { 173 struct fib fibctx; 174 struct hw_fib * hw_fib; 175 u32 index; 176 struct fib *fib = &fibctx; 177 178 index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); 179 hw_fib = &dev->aif_base_va[index]; 180 181 /* 182 * Allocate a FIB at all costs. For non queued stuff 183 * we can just use the stack so we are happy. We need 184 * a fib object in order to manage the linked lists 185 */ 186 if (dev->aif_thread) 187 if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) 188 fib = &fibctx; 189 190 memset(fib, 0, sizeof(struct fib)); 191 INIT_LIST_HEAD(&fib->fiblink); 192 fib->type = FSAFS_NTC_FIB_CONTEXT; 193 fib->size = sizeof(struct fib); 194 fib->hw_fib_va = hw_fib; 195 fib->data = hw_fib->data; 196 fib->dev = dev; 197 198 199 if (dev->aif_thread && fib != &fibctx) { 200 list_add_tail(&fib->fiblink, &q->cmdq); 201 aac_consumer_free(dev, q, HostNormCmdQueue); 202 wake_up_interruptible(&q->cmdready); 203 } else { 204 aac_consumer_free(dev, q, HostNormCmdQueue); 205 spin_unlock_irqrestore(q->lock, flags); 206 /* 207 * Set the status of this FIB 208 */ 209 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 210 aac_fib_adapter_complete(fib, sizeof(u32)); 211 spin_lock_irqsave(q->lock, flags); 212 } 213 } 214 spin_unlock_irqrestore(q->lock, flags); 215 return 0; 216} 217 218/* 219 * 220 * aac_aif_callback 221 * @context: the context set in the fib - here it is scsi cmd 222 * @fibptr: pointer to the fib 223 * 224 * Handles the AIFs - new method (SRC) 225 * 226 */ 227 228static void aac_aif_callback(void *context, struct fib * fibptr) 229{ 230 struct fib *fibctx; 231 struct aac_dev *dev; 232 struct aac_aifcmd *cmd; 233 234 fibctx = (struct fib *)context; 235 BUG_ON(fibptr == NULL); 236 dev = fibptr->dev; 237 238 if ((fibptr->hw_fib_va->header.XferState & 239 cpu_to_le32(NoMoreAifDataAvailable)) || 240 dev->sa_firmware) { 241 aac_fib_complete(fibptr); 242 aac_fib_free(fibptr); 243 return; 244 } 245 246 aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va); 247 248 aac_fib_init(fibctx); 249 cmd = (struct aac_aifcmd *) fib_data(fibctx); 250 cmd->command = cpu_to_le32(AifReqEvent); 251 252 aac_fib_send(AifRequest, 253 fibctx, 254 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 255 FsaNormal, 256 0, 1, 257 (fib_callback)aac_aif_callback, fibctx); 258} 259 260 261/* 262 * aac_intr_normal - Handle command replies 263 * @dev: Device 264 * @index: completion reference 265 * 266 * This DPC routine will be run when the adapter interrupts us to let us 267 * know there is a response on our normal priority queue. We will pull off 268 * all QE there are and wake up all the waiters before exiting. 269 */ 270unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif, 271 int isFastResponse, struct hw_fib *aif_fib) 272{ 273 unsigned long mflags; 274 dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index)); 275 if (isAif == 1) { /* AIF - common */ 276 struct hw_fib * hw_fib; 277 struct fib * fib; 278 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; 279 unsigned long flags; 280 281 /* 282 * Allocate a FIB. For non queued stuff we can just use 283 * the stack so we are happy. We need a fib object in order to 284 * manage the linked lists. 285 */ 286 if ((!dev->aif_thread) 287 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) 288 return 1; 289 if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { 290 kfree (fib); 291 return 1; 292 } 293 if (dev->sa_firmware) { 294 fib->hbacmd_size = index; /* store event type */ 295 } else if (aif_fib != NULL) { 296 memcpy(hw_fib, aif_fib, sizeof(struct hw_fib)); 297 } else { 298 memcpy(hw_fib, (struct hw_fib *) 299 (((uintptr_t)(dev->regs.sa)) + index), 300 sizeof(struct hw_fib)); 301 } 302 INIT_LIST_HEAD(&fib->fiblink); 303 fib->type = FSAFS_NTC_FIB_CONTEXT; 304 fib->size = sizeof(struct fib); 305 fib->hw_fib_va = hw_fib; 306 fib->data = hw_fib->data; 307 fib->dev = dev; 308 309 spin_lock_irqsave(q->lock, flags); 310 list_add_tail(&fib->fiblink, &q->cmdq); 311 wake_up_interruptible(&q->cmdready); 312 spin_unlock_irqrestore(q->lock, flags); 313 return 1; 314 } else if (isAif == 2) { /* AIF - new (SRC) */ 315 struct fib *fibctx; 316 struct aac_aifcmd *cmd; 317 318 fibctx = aac_fib_alloc(dev); 319 if (!fibctx) 320 return 1; 321 aac_fib_init(fibctx); 322 323 cmd = (struct aac_aifcmd *) fib_data(fibctx); 324 cmd->command = cpu_to_le32(AifReqEvent); 325 326 return aac_fib_send(AifRequest, 327 fibctx, 328 sizeof(struct hw_fib)-sizeof(struct aac_fibhdr), 329 FsaNormal, 330 0, 1, 331 (fib_callback)aac_aif_callback, fibctx); 332 } else { 333 struct fib *fib = &dev->fibs[index]; 334 int start_callback = 0; 335 336 /* 337 * Remove this fib from the Outstanding I/O queue. 338 * But only if it has not already been timed out. 339 * 340 * If the fib has been timed out already, then just 341 * continue. The caller has already been notified that 342 * the fib timed out. 343 */ 344 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); 345 346 if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { 347 aac_fib_complete(fib); 348 aac_fib_free(fib); 349 return 0; 350 } 351 352 FIB_COUNTER_INCREMENT(aac_config.FibRecved); 353 354 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) { 355 356 if (isFastResponse) 357 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 358 359 if (fib->callback) { 360 start_callback = 1; 361 } else { 362 unsigned long flagv; 363 int completed = 0; 364 365 dprintk((KERN_INFO "event_wait up\n")); 366 spin_lock_irqsave(&fib->event_lock, flagv); 367 if (fib->done == 2) { 368 fib->done = 1; 369 completed = 1; 370 } else { 371 fib->done = 1; 372 complete(&fib->event_wait); 373 } 374 spin_unlock_irqrestore(&fib->event_lock, flagv); 375 376 spin_lock_irqsave(&dev->manage_lock, mflags); 377 dev->management_fib_count--; 378 spin_unlock_irqrestore(&dev->manage_lock, 379 mflags); 380 381 FIB_COUNTER_INCREMENT(aac_config.NativeRecved); 382 if (completed) 383 aac_fib_complete(fib); 384 } 385 } else { 386 struct hw_fib *hwfib = fib->hw_fib_va; 387 388 if (isFastResponse) { 389 /* Doctor the fib */ 390 *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); 391 hwfib->header.XferState |= 392 cpu_to_le32(AdapterProcessed); 393 fib->flags |= FIB_CONTEXT_FLAG_FASTRESP; 394 } 395 396 if (hwfib->header.Command == 397 cpu_to_le16(NuFileSystem)) { 398 __le32 *pstatus = (__le32 *)hwfib->data; 399 400 if (*pstatus & cpu_to_le32(0xffff0000)) 401 *pstatus = cpu_to_le32(ST_OK); 402 } 403 if (hwfib->header.XferState & 404 cpu_to_le32(NoResponseExpected | Async)) { 405 if (hwfib->header.XferState & cpu_to_le32( 406 NoResponseExpected)) { 407 FIB_COUNTER_INCREMENT( 408 aac_config.NoResponseRecved); 409 } else { 410 FIB_COUNTER_INCREMENT( 411 aac_config.AsyncRecved); 412 } 413 start_callback = 1; 414 } else { 415 unsigned long flagv; 416 int completed = 0; 417 418 dprintk((KERN_INFO "event_wait up\n")); 419 spin_lock_irqsave(&fib->event_lock, flagv); 420 if (fib->done == 2) { 421 fib->done = 1; 422 completed = 1; 423 } else { 424 fib->done = 1; 425 complete(&fib->event_wait); 426 } 427 spin_unlock_irqrestore(&fib->event_lock, flagv); 428 429 spin_lock_irqsave(&dev->manage_lock, mflags); 430 dev->management_fib_count--; 431 spin_unlock_irqrestore(&dev->manage_lock, 432 mflags); 433 434 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 435 if (completed) 436 aac_fib_complete(fib); 437 } 438 } 439 440 441 if (start_callback) { 442 /* 443 * NOTE: we cannot touch the fib after this 444 * call, because it may have been deallocated. 445 */ 446 if (likely(fib->callback && fib->callback_data)) { 447 fib->callback(fib->callback_data, fib); 448 } else { 449 aac_fib_complete(fib); 450 aac_fib_free(fib); 451 } 452 453 } 454 return 0; 455 } 456}