bh.c (14530B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers 4 * 5 * Copyright (c) 2010, ST-Ericsson 6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 7 * 8 * Based on: 9 * ST-Ericsson UMAC CW1200 driver, which is 10 * Copyright (c) 2010, ST-Ericsson 11 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> 12 */ 13 14#include <linux/module.h> 15#include <net/mac80211.h> 16#include <linux/kthread.h> 17#include <linux/timer.h> 18 19#include "cw1200.h" 20#include "bh.h" 21#include "hwio.h" 22#include "wsm.h" 23#include "hwbus.h" 24#include "debug.h" 25#include "fwio.h" 26 27static int cw1200_bh(void *arg); 28 29#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) 30/* an SPI message cannot be bigger than (2"12-1)*2 bytes 31 * "*2" to cvt to bytes 32 */ 33#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) 34#define PIGGYBACK_CTRL_REG (2) 35#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) 36 37/* Suspend state privates */ 38enum cw1200_bh_pm_state { 39 CW1200_BH_RESUMED = 0, 40 CW1200_BH_SUSPEND, 41 CW1200_BH_SUSPENDED, 42 CW1200_BH_RESUME, 43}; 44 45static void cw1200_bh_work(struct work_struct *work) 46{ 47 struct cw1200_common *priv = 48 container_of(work, struct cw1200_common, bh_work); 49 cw1200_bh(priv); 50} 51 52int cw1200_register_bh(struct cw1200_common *priv) 53{ 54 int err = 0; 55 /* Realtime workqueue */ 56 priv->bh_workqueue = alloc_workqueue("cw1200_bh", 57 WQ_MEM_RECLAIM | WQ_HIGHPRI 58 | WQ_CPU_INTENSIVE, 1); 59 60 if (!priv->bh_workqueue) 61 return -ENOMEM; 62 63 INIT_WORK(&priv->bh_work, cw1200_bh_work); 64 65 pr_debug("[BH] register.\n"); 66 67 atomic_set(&priv->bh_rx, 0); 68 atomic_set(&priv->bh_tx, 0); 69 atomic_set(&priv->bh_term, 0); 70 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); 71 priv->bh_error = 0; 72 priv->hw_bufs_used = 0; 73 priv->buf_id_tx = 0; 74 priv->buf_id_rx = 0; 75 init_waitqueue_head(&priv->bh_wq); 76 init_waitqueue_head(&priv->bh_evt_wq); 77 78 err = !queue_work(priv->bh_workqueue, &priv->bh_work); 79 WARN_ON(err); 80 return err; 81} 82 83void cw1200_unregister_bh(struct cw1200_common *priv) 84{ 85 atomic_inc(&priv->bh_term); 86 wake_up(&priv->bh_wq); 87 88 destroy_workqueue(priv->bh_workqueue); 89 priv->bh_workqueue = NULL; 90 91 pr_debug("[BH] unregistered.\n"); 92} 93 94void cw1200_irq_handler(struct cw1200_common *priv) 95{ 96 pr_debug("[BH] irq.\n"); 97 98 /* Disable Interrupts! */ 99 /* NOTE: hwbus_ops->lock already held */ 100 __cw1200_irq_enable(priv, 0); 101 102 if (/* WARN_ON */(priv->bh_error)) 103 return; 104 105 if (atomic_inc_return(&priv->bh_rx) == 1) 106 wake_up(&priv->bh_wq); 107} 108EXPORT_SYMBOL_GPL(cw1200_irq_handler); 109 110void cw1200_bh_wakeup(struct cw1200_common *priv) 111{ 112 pr_debug("[BH] wakeup.\n"); 113 if (priv->bh_error) { 114 pr_err("[BH] wakeup failed (BH error)\n"); 115 return; 116 } 117 118 if (atomic_inc_return(&priv->bh_tx) == 1) 119 wake_up(&priv->bh_wq); 120} 121 122int cw1200_bh_suspend(struct cw1200_common *priv) 123{ 124 pr_debug("[BH] suspend.\n"); 125 if (priv->bh_error) { 126 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); 127 return -EINVAL; 128 } 129 130 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); 131 wake_up(&priv->bh_wq); 132 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || 133 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), 134 1 * HZ) ? 0 : -ETIMEDOUT; 135} 136 137int cw1200_bh_resume(struct cw1200_common *priv) 138{ 139 pr_debug("[BH] resume.\n"); 140 if (priv->bh_error) { 141 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); 142 return -EINVAL; 143 } 144 145 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); 146 wake_up(&priv->bh_wq); 147 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || 148 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), 149 1 * HZ) ? 0 : -ETIMEDOUT; 150} 151 152static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) 153{ 154 ++priv->hw_bufs_used; 155} 156 157int wsm_release_tx_buffer(struct cw1200_common *priv, int count) 158{ 159 int ret = 0; 160 int hw_bufs_used = priv->hw_bufs_used; 161 162 priv->hw_bufs_used -= count; 163 if (WARN_ON(priv->hw_bufs_used < 0)) 164 ret = -1; 165 else if (hw_bufs_used >= priv->wsm_caps.input_buffers) 166 ret = 1; 167 if (!priv->hw_bufs_used) 168 wake_up(&priv->bh_evt_wq); 169 return ret; 170} 171 172static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, 173 u16 *ctrl_reg) 174{ 175 int ret; 176 177 ret = cw1200_reg_read_16(priv, 178 ST90TDS_CONTROL_REG_ID, ctrl_reg); 179 if (ret) { 180 ret = cw1200_reg_read_16(priv, 181 ST90TDS_CONTROL_REG_ID, ctrl_reg); 182 if (ret) 183 pr_err("[BH] Failed to read control register.\n"); 184 } 185 186 return ret; 187} 188 189static int cw1200_device_wakeup(struct cw1200_common *priv) 190{ 191 u16 ctrl_reg; 192 int ret; 193 194 pr_debug("[BH] Device wakeup.\n"); 195 196 /* First, set the dpll register */ 197 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, 198 cw1200_dpll_from_clk(priv->hw_refclk)); 199 if (WARN_ON(ret)) 200 return ret; 201 202 /* To force the device to be always-on, the host sets WLAN_UP to 1 */ 203 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 204 ST90TDS_CONT_WUP_BIT); 205 if (WARN_ON(ret)) 206 return ret; 207 208 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); 209 if (WARN_ON(ret)) 210 return ret; 211 212 /* If the device returns WLAN_RDY as 1, the device is active and will 213 * remain active. 214 */ 215 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { 216 pr_debug("[BH] Device awake.\n"); 217 return 1; 218 } 219 220 return 0; 221} 222 223/* Must be called from BH thraed. */ 224void cw1200_enable_powersave(struct cw1200_common *priv, 225 bool enable) 226{ 227 pr_debug("[BH] Powerave is %s.\n", 228 enable ? "enabled" : "disabled"); 229 priv->powersave_enabled = enable; 230} 231 232static int cw1200_bh_rx_helper(struct cw1200_common *priv, 233 uint16_t *ctrl_reg, 234 int *tx) 235{ 236 size_t read_len = 0; 237 struct sk_buff *skb_rx = NULL; 238 struct wsm_hdr *wsm; 239 size_t wsm_len; 240 u16 wsm_id; 241 u8 wsm_seq; 242 int rx_resync = 1; 243 244 size_t alloc_len; 245 u8 *data; 246 247 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; 248 if (!read_len) 249 return 0; /* No more work */ 250 251 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || 252 (read_len > EFFECTIVE_BUF_SIZE))) { 253 pr_debug("Invalid read len: %zu (%04x)", 254 read_len, *ctrl_reg); 255 goto err; 256 } 257 258 /* Add SIZE of PIGGYBACK reg (CONTROL Reg) 259 * to the NEXT Message length + 2 Bytes for SKB 260 */ 261 read_len = read_len + 2; 262 263 alloc_len = priv->hwbus_ops->align_size( 264 priv->hwbus_priv, read_len); 265 266 /* Check if not exceeding CW1200 capabilities */ 267 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { 268 pr_debug("Read aligned len: %zu\n", 269 alloc_len); 270 } 271 272 skb_rx = dev_alloc_skb(alloc_len); 273 if (WARN_ON(!skb_rx)) 274 goto err; 275 276 skb_trim(skb_rx, 0); 277 skb_put(skb_rx, read_len); 278 data = skb_rx->data; 279 if (WARN_ON(!data)) 280 goto err; 281 282 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { 283 pr_err("rx blew up, len %zu\n", alloc_len); 284 goto err; 285 } 286 287 /* Piggyback */ 288 *ctrl_reg = __le16_to_cpu( 289 ((__le16 *)data)[alloc_len / 2 - 1]); 290 291 wsm = (struct wsm_hdr *)data; 292 wsm_len = __le16_to_cpu(wsm->len); 293 if (WARN_ON(wsm_len > read_len)) 294 goto err; 295 296 if (priv->wsm_enable_wsm_dumps) 297 print_hex_dump_bytes("<-- ", 298 DUMP_PREFIX_NONE, 299 data, wsm_len); 300 301 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; 302 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; 303 304 skb_trim(skb_rx, wsm_len); 305 306 if (wsm_id == 0x0800) { 307 wsm_handle_exception(priv, 308 &data[sizeof(*wsm)], 309 wsm_len - sizeof(*wsm)); 310 goto err; 311 } else if (!rx_resync) { 312 if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) 313 goto err; 314 } 315 priv->wsm_rx_seq = (wsm_seq + 1) & 7; 316 rx_resync = 0; 317 318 if (wsm_id & 0x0400) { 319 int rc = wsm_release_tx_buffer(priv, 1); 320 if (WARN_ON(rc < 0)) 321 return rc; 322 else if (rc > 0) 323 *tx = 1; 324 } 325 326 /* cw1200_wsm_rx takes care on SKB livetime */ 327 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) 328 goto err; 329 330 if (skb_rx) { 331 dev_kfree_skb(skb_rx); 332 skb_rx = NULL; 333 } 334 335 return 0; 336 337err: 338 if (skb_rx) { 339 dev_kfree_skb(skb_rx); 340 skb_rx = NULL; 341 } 342 return -1; 343} 344 345static int cw1200_bh_tx_helper(struct cw1200_common *priv, 346 int *pending_tx, 347 int *tx_burst) 348{ 349 size_t tx_len; 350 u8 *data; 351 int ret; 352 struct wsm_hdr *wsm; 353 354 if (priv->device_can_sleep) { 355 ret = cw1200_device_wakeup(priv); 356 if (WARN_ON(ret < 0)) { /* Error in wakeup */ 357 *pending_tx = 1; 358 return 0; 359 } else if (ret) { /* Woke up */ 360 priv->device_can_sleep = false; 361 } else { /* Did not awake */ 362 *pending_tx = 1; 363 return 0; 364 } 365 } 366 367 wsm_alloc_tx_buffer(priv); 368 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); 369 if (ret <= 0) { 370 wsm_release_tx_buffer(priv, 1); 371 if (WARN_ON(ret < 0)) 372 return ret; /* Error */ 373 return 0; /* No work */ 374 } 375 376 wsm = (struct wsm_hdr *)data; 377 BUG_ON(tx_len < sizeof(*wsm)); 378 BUG_ON(__le16_to_cpu(wsm->len) != tx_len); 379 380 atomic_inc(&priv->bh_tx); 381 382 tx_len = priv->hwbus_ops->align_size( 383 priv->hwbus_priv, tx_len); 384 385 /* Check if not exceeding CW1200 capabilities */ 386 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) 387 pr_debug("Write aligned len: %zu\n", tx_len); 388 389 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); 390 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); 391 392 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { 393 pr_err("tx blew up, len %zu\n", tx_len); 394 wsm_release_tx_buffer(priv, 1); 395 return -1; /* Error */ 396 } 397 398 if (priv->wsm_enable_wsm_dumps) 399 print_hex_dump_bytes("--> ", 400 DUMP_PREFIX_NONE, 401 data, 402 __le16_to_cpu(wsm->len)); 403 404 wsm_txed(priv, data); 405 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; 406 407 if (*tx_burst > 1) { 408 cw1200_debug_tx_burst(priv); 409 return 1; /* Work remains */ 410 } 411 412 return 0; 413} 414 415static int cw1200_bh(void *arg) 416{ 417 struct cw1200_common *priv = arg; 418 int rx, tx, term, suspend; 419 u16 ctrl_reg = 0; 420 int tx_allowed; 421 int pending_tx = 0; 422 int tx_burst; 423 long status; 424 u32 dummy; 425 int ret; 426 427 for (;;) { 428 if (!priv->hw_bufs_used && 429 priv->powersave_enabled && 430 !priv->device_can_sleep && 431 !atomic_read(&priv->recent_scan)) { 432 status = 1 * HZ; 433 pr_debug("[BH] Device wakedown. No data.\n"); 434 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); 435 priv->device_can_sleep = true; 436 } else if (priv->hw_bufs_used) { 437 /* Interrupt loss detection */ 438 status = 1 * HZ; 439 } else { 440 status = MAX_SCHEDULE_TIMEOUT; 441 } 442 443 /* Dummy Read for SDIO retry mechanism*/ 444 if ((priv->hw_type != -1) && 445 (atomic_read(&priv->bh_rx) == 0) && 446 (atomic_read(&priv->bh_tx) == 0)) 447 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, 448 &dummy, sizeof(dummy)); 449 450 pr_debug("[BH] waiting ...\n"); 451 status = wait_event_interruptible_timeout(priv->bh_wq, ({ 452 rx = atomic_xchg(&priv->bh_rx, 0); 453 tx = atomic_xchg(&priv->bh_tx, 0); 454 term = atomic_xchg(&priv->bh_term, 0); 455 suspend = pending_tx ? 456 0 : atomic_read(&priv->bh_suspend); 457 (rx || tx || term || suspend || priv->bh_error); 458 }), status); 459 460 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n", 461 rx, tx, term, suspend, priv->bh_error, status); 462 463 /* Did an error occur? */ 464 if ((status < 0 && status != -ERESTARTSYS) || 465 term || priv->bh_error) { 466 break; 467 } 468 if (!status) { /* wait_event timed out */ 469 unsigned long timestamp = jiffies; 470 long timeout; 471 int pending = 0; 472 int i; 473 474 /* Check to see if we have any outstanding frames */ 475 if (priv->hw_bufs_used && (!rx || !tx)) { 476 wiphy_warn(priv->hw->wiphy, 477 "Missed interrupt? (%d frames outstanding)\n", 478 priv->hw_bufs_used); 479 rx = 1; 480 481 /* Get a timestamp of "oldest" frame */ 482 for (i = 0; i < 4; ++i) 483 pending += cw1200_queue_get_xmit_timestamp( 484 &priv->tx_queue[i], 485 ×tamp, 486 priv->pending_frame_id); 487 488 /* Check if frame transmission is timed out. 489 * Add an extra second with respect to possible 490 * interrupt loss. 491 */ 492 timeout = timestamp + 493 WSM_CMD_LAST_CHANCE_TIMEOUT + 494 1 * HZ - 495 jiffies; 496 497 /* And terminate BH thread if the frame is "stuck" */ 498 if (pending && timeout < 0) { 499 wiphy_warn(priv->hw->wiphy, 500 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", 501 priv->hw_bufs_used, pending, 502 timestamp, jiffies); 503 break; 504 } 505 } else if (!priv->device_can_sleep && 506 !atomic_read(&priv->recent_scan)) { 507 pr_debug("[BH] Device wakedown. Timeout.\n"); 508 cw1200_reg_write_16(priv, 509 ST90TDS_CONTROL_REG_ID, 0); 510 priv->device_can_sleep = true; 511 } 512 goto done; 513 } else if (suspend) { 514 pr_debug("[BH] Device suspend.\n"); 515 if (priv->powersave_enabled) { 516 pr_debug("[BH] Device wakedown. Suspend.\n"); 517 cw1200_reg_write_16(priv, 518 ST90TDS_CONTROL_REG_ID, 0); 519 priv->device_can_sleep = true; 520 } 521 522 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); 523 wake_up(&priv->bh_evt_wq); 524 status = wait_event_interruptible(priv->bh_wq, 525 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); 526 if (status < 0) { 527 wiphy_err(priv->hw->wiphy, 528 "Failed to wait for resume: %ld.\n", 529 status); 530 break; 531 } 532 pr_debug("[BH] Device resume.\n"); 533 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); 534 wake_up(&priv->bh_evt_wq); 535 atomic_inc(&priv->bh_rx); 536 goto done; 537 } 538 539 rx: 540 tx += pending_tx; 541 pending_tx = 0; 542 543 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) 544 break; 545 546 /* Don't bother trying to rx unless we have data to read */ 547 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { 548 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); 549 if (ret < 0) 550 break; 551 /* Double up here if there's more data.. */ 552 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { 553 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); 554 if (ret < 0) 555 break; 556 } 557 } 558 559 tx: 560 if (tx) { 561 tx = 0; 562 563 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); 564 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; 565 tx_allowed = tx_burst > 0; 566 567 if (!tx_allowed) { 568 /* Buffers full. Ensure we process tx 569 * after we handle rx.. 570 */ 571 pending_tx = tx; 572 goto done_rx; 573 } 574 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); 575 if (ret < 0) 576 break; 577 if (ret > 0) /* More to transmit */ 578 tx = ret; 579 580 /* Re-read ctrl reg */ 581 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) 582 break; 583 } 584 585 done_rx: 586 if (priv->bh_error) 587 break; 588 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) 589 goto rx; 590 if (tx) 591 goto tx; 592 593 done: 594 /* Re-enable device interrupts */ 595 priv->hwbus_ops->lock(priv->hwbus_priv); 596 __cw1200_irq_enable(priv, 1); 597 priv->hwbus_ops->unlock(priv->hwbus_priv); 598 } 599 600 /* Explicitly disable device interrupts */ 601 priv->hwbus_ops->lock(priv->hwbus_priv); 602 __cw1200_irq_enable(priv, 0); 603 priv->hwbus_ops->unlock(priv->hwbus_priv); 604 605 if (!term) { 606 pr_err("[BH] Fatal error, exiting.\n"); 607 priv->bh_error = 1; 608 /* TODO: schedule_work(recovery) */ 609 } 610 return 0; 611}