main.c (52232B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2021-2022 Intel Corporation 4 */ 5 6#include <linux/etherdevice.h> 7#include <linux/netdevice.h> 8#include <linux/ieee80211.h> 9#include <linux/rtnetlink.h> 10#include <linux/module.h> 11#include <linux/moduleparam.h> 12#include <linux/mei_cl_bus.h> 13#include <linux/rcupdate.h> 14#include <linux/debugfs.h> 15#include <linux/skbuff.h> 16#include <linux/wait.h> 17#include <linux/slab.h> 18#include <linux/mm.h> 19 20#include <net/cfg80211.h> 21 22#include "internal.h" 23#include "iwl-mei.h" 24#include "trace.h" 25#include "trace-data.h" 26#include "sap.h" 27 28MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface"); 29MODULE_LICENSE("GPL"); 30 31#define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \ 32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65) 33 34/* 35 * Since iwlwifi calls iwlmei without any context, hold a pointer to the 36 * mei_cl_device structure here. 37 * Define a mutex that will synchronize all the flows between iwlwifi and 38 * iwlmei. 39 * Note that iwlmei can't have several instances, so it ok to have static 40 * variables here. 41 */ 42static struct mei_cl_device *iwl_mei_global_cldev; 43static DEFINE_MUTEX(iwl_mei_mutex); 44static unsigned long iwl_mei_status; 45 46enum iwl_mei_status_bits { 47 IWL_MEI_STATUS_SAP_CONNECTED, 48}; 49 50bool iwl_mei_is_connected(void) 51{ 52 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 53} 54EXPORT_SYMBOL_GPL(iwl_mei_is_connected); 55 56#define SAP_VERSION 3 57#define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ 58 59struct iwl_sap_q_ctrl_blk { 60 __le32 wr_ptr; 61 __le32 rd_ptr; 62 __le32 size; 63}; 64 65enum iwl_sap_q_idx { 66 SAP_QUEUE_IDX_NOTIF = 0, 67 SAP_QUEUE_IDX_DATA, 68 SAP_QUEUE_IDX_MAX, 69}; 70 71struct iwl_sap_dir { 72 __le32 reserved; 73 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX]; 74}; 75 76enum iwl_sap_dir_idx { 77 SAP_DIRECTION_HOST_TO_ME = 0, 78 SAP_DIRECTION_ME_TO_HOST, 79 SAP_DIRECTION_MAX, 80}; 81 82struct iwl_sap_shared_mem_ctrl_blk { 83 __le32 sap_id; 84 __le32 size; 85 struct iwl_sap_dir dir[SAP_DIRECTION_MAX]; 86}; 87 88/* 89 * The shared area has the following layout: 90 * 91 * +-----------------------------------+ 92 * |struct iwl_sap_shared_mem_ctrl_blk | 93 * +-----------------------------------+ 94 * |Host -> ME data queue | 95 * +-----------------------------------+ 96 * |Host -> ME notif queue | 97 * +-----------------------------------+ 98 * |ME -> Host data queue | 99 * +-----------------------------------+ 100 * |ME -> host notif queue | 101 * +-----------------------------------+ 102 * |SAP control block id (SAP!) | 103 * +-----------------------------------+ 104 */ 105 106#define SAP_H2M_DATA_Q_SZ 48256 107#define SAP_M2H_DATA_Q_SZ 24128 108#define SAP_H2M_NOTIF_Q_SZ 2240 109#define SAP_M2H_NOTIF_Q_SZ 62720 110 111#define _IWL_MEI_SAP_SHARED_MEM_SZ \ 112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ 113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ 114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) 115 116#define IWL_MEI_SAP_SHARED_MEM_SZ \ 117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) 118 119struct iwl_mei_shared_mem_ptrs { 120 struct iwl_sap_shared_mem_ctrl_blk *ctrl; 121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; 122 size_t q_size[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; 123}; 124 125struct iwl_mei_filters { 126 struct rcu_head rcu_head; 127 struct iwl_sap_oob_filters filters; 128}; 129 130/** 131 * struct iwl_mei - holds the private date for iwl_mei 132 * 133 * @get_nvm_wq: the wait queue for the get_nvm flow 134 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA 135 * message. Used so that we can send CHECK_SHARED_AREA from atomic 136 * contexts. 137 * @get_ownership_wq: the wait queue for the get_ownership_flow 138 * @shared_mem: the memory that is shared between CSME and the host 139 * @cldev: the pointer to the MEI client device 140 * @nvm: the data returned by the CSME for the NVM 141 * @filters: the filters sent by CSME 142 * @got_ownership: true if we own the device 143 * @amt_enabled: true if CSME has wireless enabled 144 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI 145 * bus, but rather need to wait until send_csa_msg_wk runs 146 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember 147 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down 148 * flow. 149 * @link_prot_state: true when we are in link protection PASSIVE 150 * @csa_throttle_end_wk: used when &csa_throttled is true 151 * @data_q_lock: protects the access to the data queues which are 152 * accessed without the mutex. 153 * @sap_seq_no: the sequence number for the SAP messages 154 * @seq_no: the sequence number for the SAP messages 155 * @dbgfs_dir: the debugfs dir entry 156 */ 157struct iwl_mei { 158 wait_queue_head_t get_nvm_wq; 159 struct work_struct send_csa_msg_wk; 160 wait_queue_head_t get_ownership_wq; 161 struct iwl_mei_shared_mem_ptrs shared_mem; 162 struct mei_cl_device *cldev; 163 struct iwl_mei_nvm *nvm; 164 struct iwl_mei_filters __rcu *filters; 165 bool got_ownership; 166 bool amt_enabled; 167 bool csa_throttled; 168 bool csme_taking_ownership; 169 bool link_prot_state; 170 struct delayed_work csa_throttle_end_wk; 171 spinlock_t data_q_lock; 172 173 atomic_t sap_seq_no; 174 atomic_t seq_no; 175 176 struct dentry *dbgfs_dir; 177}; 178 179/** 180 * struct iwl_mei_cache - cache for the parameters from iwlwifi 181 * @ops: Callbacks to iwlwifi. 182 * @netdev: The netdev that will be used to transmit / receive packets. 183 * @conn_info: The connection info message triggered by iwlwifi's association. 184 * @power_limit: pointer to an array of 10 elements (le16) represents the power 185 * restrictions per chain. 186 * @rf_kill: rf kill state. 187 * @mcc: MCC info 188 * @mac_address: interface MAC address. 189 * @nvm_address: NVM MAC address. 190 * @priv: A pointer to iwlwifi. 191 * 192 * This used to cache the configurations coming from iwlwifi's way. The data 193 * is cached here so that we can buffer the configuration even if we don't have 194 * a bind from the mei bus and hence, on iwl_mei structure. 195 */ 196struct iwl_mei_cache { 197 const struct iwl_mei_ops *ops; 198 struct net_device __rcu *netdev; 199 const struct iwl_sap_notif_connection_info *conn_info; 200 const __le16 *power_limit; 201 u32 rf_kill; 202 u16 mcc; 203 u8 mac_address[6]; 204 u8 nvm_address[6]; 205 void *priv; 206}; 207 208static struct iwl_mei_cache iwl_mei_cache = { 209 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED 210}; 211 212static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) 213{ 214 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 215 216 if (mei_cldev_dma_unmap(cldev)) 217 dev_err(&cldev->dev, "Couldn't unmap the shared mem properly\n"); 218 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem)); 219} 220 221#define HBM_DMA_BUF_ID_WLAN 1 222 223static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) 224{ 225 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 226 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 227 228 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, 229 IWL_MEI_SAP_SHARED_MEM_SZ); 230 231 if (IS_ERR(mem->ctrl)) { 232 int ret = PTR_ERR(mem->ctrl); 233 234 mem->ctrl = NULL; 235 236 return ret; 237 } 238 239 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); 240 241 return 0; 242} 243 244static void iwl_mei_init_shared_mem(struct iwl_mei *mei) 245{ 246 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 247 struct iwl_sap_dir *h2m; 248 struct iwl_sap_dir *m2h; 249 int dir, queue; 250 u8 *q_head; 251 252 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 253 254 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl)); 255 256 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 257 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 258 259 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 260 cpu_to_le32(SAP_H2M_DATA_Q_SZ); 261 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 262 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); 263 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 264 cpu_to_le32(SAP_M2H_DATA_Q_SZ); 265 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 266 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ); 267 268 /* q_head points to the start of the first queue */ 269 q_head = (void *)(mem->ctrl + 1); 270 271 /* Initialize the queue heads */ 272 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) { 273 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) { 274 mem->q_head[dir][queue] = q_head; 275 q_head += 276 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); 277 mem->q_size[dir][queue] = 278 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); 279 } 280 } 281 282 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 283} 284 285static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev, 286 struct iwl_sap_q_ctrl_blk *notif_q, 287 u8 *q_head, 288 const struct iwl_sap_hdr *hdr, 289 u32 q_sz) 290{ 291 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 292 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 293 size_t room_in_buf; 294 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len); 295 296 if (rd > q_sz || wr > q_sz) { 297 dev_err(&cldev->dev, 298 "Pointers are past the end of the buffer\n"); 299 return -EINVAL; 300 } 301 302 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 303 304 /* we don't have enough room for the data to write */ 305 if (room_in_buf < tx_sz) { 306 dev_err(&cldev->dev, 307 "Not enough room in the buffer\n"); 308 return -ENOSPC; 309 } 310 311 if (wr + tx_sz <= q_sz) { 312 memcpy(q_head + wr, hdr, tx_sz); 313 } else { 314 memcpy(q_head + wr, hdr, q_sz - wr); 315 memcpy(q_head, (const u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr)); 316 } 317 318 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 319 return 0; 320} 321 322static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei) 323{ 324 struct iwl_sap_q_ctrl_blk *notif_q; 325 struct iwl_sap_dir *dir; 326 327 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 328 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 329 330 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr)) 331 return true; 332 333 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 334 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr); 335} 336 337static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev) 338{ 339 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 340 struct iwl_sap_me_msg_start msg = { 341 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA), 342 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 343 }; 344 int ret; 345 346 lockdep_assert_held(&iwl_mei_mutex); 347 348 if (mei->csa_throttled) 349 return 0; 350 351 trace_iwlmei_me_msg(&msg.hdr, true); 352 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 353 if (ret != sizeof(msg)) { 354 dev_err(&cldev->dev, 355 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n", 356 ret); 357 return ret; 358 } 359 360 mei->csa_throttled = true; 361 362 schedule_delayed_work(&mei->csa_throttle_end_wk, 363 msecs_to_jiffies(100)); 364 365 return 0; 366} 367 368static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk) 369{ 370 struct iwl_mei *mei = 371 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work); 372 373 mutex_lock(&iwl_mei_mutex); 374 375 mei->csa_throttled = false; 376 377 if (iwl_mei_host_to_me_data_pending(mei)) 378 iwl_mei_send_check_shared_area(mei->cldev); 379 380 mutex_unlock(&iwl_mei_mutex); 381} 382 383static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev, 384 struct iwl_sap_hdr *hdr) 385{ 386 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 387 struct iwl_sap_q_ctrl_blk *notif_q; 388 struct iwl_sap_dir *dir; 389 void *q_head; 390 u32 q_sz; 391 int ret; 392 393 lockdep_assert_held(&iwl_mei_mutex); 394 395 if (!mei->shared_mem.ctrl) { 396 dev_err(&cldev->dev, 397 "No shared memory, can't send any SAP message\n"); 398 return -EINVAL; 399 } 400 401 if (!iwl_mei_is_connected()) { 402 dev_err(&cldev->dev, 403 "Can't send a SAP message if we're not connected\n"); 404 return -ENODEV; 405 } 406 407 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 408 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type); 409 410 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 411 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 412 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; 413 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; 414 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr, q_sz); 415 416 if (ret < 0) 417 return ret; 418 419 trace_iwlmei_sap_cmd(hdr, true); 420 421 return iwl_mei_send_check_shared_area(cldev); 422} 423 424void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) 425{ 426 struct iwl_sap_q_ctrl_blk *notif_q; 427 struct iwl_sap_dir *dir; 428 struct iwl_mei *mei; 429 size_t room_in_buf; 430 size_t tx_sz; 431 size_t hdr_sz; 432 u32 q_sz; 433 u32 rd; 434 u32 wr; 435 u8 *q_head; 436 437 if (!iwl_mei_global_cldev) 438 return; 439 440 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 441 442 /* 443 * We access this path for Rx packets (the more common case) 444 * and from Tx path when we send DHCP packets, the latter is 445 * very unlikely. 446 * Take the lock already here to make sure we see that remove() 447 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit. 448 */ 449 spin_lock_bh(&mei->data_q_lock); 450 451 if (!iwl_mei_is_connected()) { 452 spin_unlock_bh(&mei->data_q_lock); 453 return; 454 } 455 456 /* 457 * We are in a RCU critical section and the remove from the CSME bus 458 * which would free this memory waits for the readers to complete (this 459 * is done in netdev_rx_handler_unregister). 460 */ 461 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 462 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 463 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; 464 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; 465 466 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 467 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 468 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) : 469 sizeof(struct iwl_sap_hdr); 470 tx_sz = skb->len + hdr_sz; 471 472 if (rd > q_sz || wr > q_sz) { 473 dev_err(&mei->cldev->dev, 474 "can't write the data: pointers are past the end of the buffer\n"); 475 goto out; 476 } 477 478 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 479 480 /* we don't have enough room for the data to write */ 481 if (room_in_buf < tx_sz) { 482 dev_err(&mei->cldev->dev, 483 "Not enough room in the buffer for this data\n"); 484 goto out; 485 } 486 487 if (skb_headroom(skb) < hdr_sz) { 488 dev_err(&mei->cldev->dev, 489 "Not enough headroom in the skb to write the SAP header\n"); 490 goto out; 491 } 492 493 if (cb_tx) { 494 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); 495 496 memset(cb_hdr, 0, sizeof(*cb_hdr)); 497 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); 498 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); 499 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 500 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX)); 501 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr)); 502 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP); 503 } else { 504 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr)); 505 506 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET); 507 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 508 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 509 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR); 510 } 511 512 if (wr + tx_sz <= q_sz) { 513 skb_copy_bits(skb, 0, q_head + wr, tx_sz); 514 } else { 515 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr); 516 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr)); 517 } 518 519 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 520 521out: 522 spin_unlock_bh(&mei->data_q_lock); 523} 524 525static int 526iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type) 527{ 528 struct iwl_sap_hdr msg = { 529 .type = cpu_to_le16(type), 530 }; 531 532 return iwl_mei_send_sap_msg_payload(cldev, &msg); 533} 534 535static void iwl_mei_send_csa_msg_wk(struct work_struct *wk) 536{ 537 struct iwl_mei *mei = 538 container_of(wk, struct iwl_mei, send_csa_msg_wk); 539 540 if (!iwl_mei_is_connected()) 541 return; 542 543 mutex_lock(&iwl_mei_mutex); 544 545 iwl_mei_send_check_shared_area(mei->cldev); 546 547 mutex_unlock(&iwl_mei_mutex); 548} 549 550/* Called in a RCU read critical section from netif_receive_skb */ 551static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb) 552{ 553 struct sk_buff *skb = *pskb; 554 struct iwl_mei *mei = 555 rcu_dereference(skb->dev->rx_handler_data); 556 struct iwl_mei_filters *filters = rcu_dereference(mei->filters); 557 bool rx_for_csme = false; 558 rx_handler_result_t res; 559 560 /* 561 * remove() unregisters this handler and synchronize_net, so this 562 * should never happen. 563 */ 564 if (!iwl_mei_is_connected()) { 565 dev_err(&mei->cldev->dev, 566 "Got an Rx packet, but we're not connected to SAP?\n"); 567 return RX_HANDLER_PASS; 568 } 569 570 if (filters) 571 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme); 572 else 573 res = RX_HANDLER_PASS; 574 575 /* 576 * The data is already on the ring of the shared area, all we 577 * need to do is to tell the CSME firmware to check what we have 578 * there. 579 */ 580 if (rx_for_csme) 581 schedule_work(&mei->send_csa_msg_wk); 582 583 if (res != RX_HANDLER_PASS) { 584 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR); 585 dev_kfree_skb(skb); 586 } 587 588 return res; 589} 590 591static void 592iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, 593 const struct iwl_sap_me_msg_start_ok *rsp, 594 ssize_t len) 595{ 596 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 597 598 if (len != sizeof(*rsp)) { 599 dev_err(&cldev->dev, 600 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n"); 601 dev_err(&cldev->dev, 602 "size is incorrect: %zd instead of %zu\n", 603 len, sizeof(*rsp)); 604 return; 605 } 606 607 if (rsp->supported_version != SAP_VERSION) { 608 dev_err(&cldev->dev, 609 "didn't get the expected version: got %d\n", 610 rsp->supported_version); 611 return; 612 } 613 614 mutex_lock(&iwl_mei_mutex); 615 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 616 /* wifi driver has registered already */ 617 if (iwl_mei_cache.ops) { 618 iwl_mei_send_sap_msg(mei->cldev, 619 SAP_MSG_NOTIF_WIFIDR_UP); 620 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv); 621 } 622 623 mutex_unlock(&iwl_mei_mutex); 624} 625 626static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev, 627 const struct iwl_sap_csme_filters *filters) 628{ 629 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 630 struct iwl_mei_filters *new_filters; 631 struct iwl_mei_filters *old_filters; 632 633 old_filters = 634 rcu_dereference_protected(mei->filters, 635 lockdep_is_held(&iwl_mei_mutex)); 636 637 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL); 638 if (!new_filters) 639 return; 640 641 /* Copy the OOB filters */ 642 new_filters->filters = filters->filters; 643 644 rcu_assign_pointer(mei->filters, new_filters); 645 646 if (old_filters) 647 kfree_rcu(old_filters, rcu_head); 648} 649 650static void 651iwl_mei_handle_conn_status(struct mei_cl_device *cldev, 652 const struct iwl_sap_notif_conn_status *status) 653{ 654 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 655 struct iwl_mei_conn_info conn_info = { 656 .lp_state = le32_to_cpu(status->link_prot_state), 657 .ssid_len = le32_to_cpu(status->conn_info.ssid_len), 658 .channel = status->conn_info.channel, 659 .band = status->conn_info.band, 660 .auth_mode = le32_to_cpu(status->conn_info.auth_mode), 661 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher), 662 }; 663 664 if (!iwl_mei_cache.ops || 665 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid)) 666 return; 667 668 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len); 669 ether_addr_copy(conn_info.bssid, status->conn_info.bssid); 670 671 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); 672 673 mei->link_prot_state = status->link_prot_state; 674 675 /* 676 * Update the Rfkill state in case the host does not own the device: 677 * if we are in Link Protection, ask to not touch the device, else, 678 * unblock rfkill. 679 * If the host owns the device, inform the user space whether it can 680 * roam. 681 */ 682 if (mei->got_ownership) 683 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv, 684 status->link_prot_state); 685 else 686 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, 687 status->link_prot_state); 688} 689 690static void iwl_mei_set_init_conf(struct iwl_mei *mei) 691{ 692 struct iwl_sap_notif_host_link_up link_msg = { 693 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 694 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)), 695 }; 696 struct iwl_sap_notif_country_code mcc_msg = { 697 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 698 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)), 699 .mcc = cpu_to_le16(iwl_mei_cache.mcc), 700 }; 701 struct iwl_sap_notif_sar_limits sar_msg = { 702 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 703 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)), 704 }; 705 struct iwl_sap_notif_host_nic_info nic_info_msg = { 706 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 707 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)), 708 }; 709 struct iwl_sap_msg_dw rfkill_msg = { 710 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 711 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)), 712 .val = cpu_to_le32(iwl_mei_cache.rf_kill), 713 }; 714 715 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC); 716 717 if (iwl_mei_cache.conn_info) { 718 link_msg.conn_info = *iwl_mei_cache.conn_info; 719 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr); 720 } 721 722 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr); 723 724 if (iwl_mei_cache.power_limit) { 725 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit, 726 sizeof(sar_msg.sar_chain_info_table)); 727 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); 728 } 729 730 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); 731 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); 732 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); 733 734 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); 735} 736 737static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev, 738 const struct iwl_sap_msg_dw *dw) 739{ 740 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 741 struct net_device *netdev; 742 743 /* 744 * First take rtnl and only then the mutex to avoid an ABBA 745 * with iwl_mei_set_netdev() 746 */ 747 rtnl_lock(); 748 mutex_lock(&iwl_mei_mutex); 749 750 netdev = rcu_dereference_protected(iwl_mei_cache.netdev, 751 lockdep_is_held(&iwl_mei_mutex)); 752 753 if (mei->amt_enabled == !!le32_to_cpu(dw->val)) 754 goto out; 755 756 mei->amt_enabled = dw->val; 757 758 if (mei->amt_enabled) { 759 if (netdev) 760 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 761 762 iwl_mei_set_init_conf(mei); 763 } else { 764 if (iwl_mei_cache.ops) 765 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 766 if (netdev) 767 netdev_rx_handler_unregister(netdev); 768 } 769 770out: 771 mutex_unlock(&iwl_mei_mutex); 772 rtnl_unlock(); 773} 774 775static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev, 776 const struct iwl_sap_msg_dw *dw) 777{ 778 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 779 780 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME); 781} 782 783static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev, 784 const void *payload) 785{ 786 /* We can get ownership and driver is registered, go ahead */ 787 if (iwl_mei_cache.ops) 788 iwl_mei_send_sap_msg(cldev, 789 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 790} 791 792static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev, 793 const void *payload) 794{ 795 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 796 797 dev_info(&cldev->dev, "CSME takes ownership\n"); 798 799 mei->got_ownership = false; 800 801 /* 802 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver 803 * is finished taking the device down. 804 */ 805 mei->csme_taking_ownership = true; 806 807 if (iwl_mei_cache.ops) 808 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true); 809} 810 811static void iwl_mei_handle_nvm(struct mei_cl_device *cldev, 812 const struct iwl_sap_nvm *sap_nvm) 813{ 814 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 815 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm; 816 int i; 817 818 kfree(mei->nvm); 819 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL); 820 if (!mei->nvm) 821 return; 822 823 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr); 824 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs; 825 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg); 826 mei->nvm->caps = le32_to_cpu(sap_nvm->caps); 827 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version); 828 829 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++) 830 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]); 831 832 wake_up_all(&mei->get_nvm_wq); 833} 834 835static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev, 836 const struct iwl_sap_msg_dw *dw) 837{ 838 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 839 840 /* 841 * This means that we can't use the wifi device right now, CSME is not 842 * ready to let us use it. 843 */ 844 if (!dw->val) { 845 dev_info(&cldev->dev, "Ownership req denied\n"); 846 return; 847 } 848 849 mei->got_ownership = true; 850 wake_up_all(&mei->get_ownership_wq); 851 852 iwl_mei_send_sap_msg(cldev, 853 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED); 854 855 /* We can now start the connection, unblock rfkill */ 856 if (iwl_mei_cache.ops) 857 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 858} 859 860static void iwl_mei_handle_ping(struct mei_cl_device *cldev, 861 const struct iwl_sap_hdr *hdr) 862{ 863 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG); 864} 865 866static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev, 867 const struct iwl_sap_hdr *hdr) 868{ 869 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr); 870 u16 type = le16_to_cpu(hdr->type); 871 872 dev_dbg(&cldev->dev, 873 "Got a new SAP message: type %d, len %d, seq %d\n", 874 le16_to_cpu(hdr->type), len, 875 le32_to_cpu(hdr->seq_num)); 876 877#define SAP_MSG_HANDLER(_cmd, _handler, _sz) \ 878 case SAP_MSG_NOTIF_ ## _cmd: \ 879 if (len < _sz) { \ 880 dev_err(&cldev->dev, \ 881 "Bad size for %d: %u < %u\n", \ 882 le16_to_cpu(hdr->type), \ 883 (unsigned int)len, \ 884 (unsigned int)_sz); \ 885 break; \ 886 } \ 887 mutex_lock(&iwl_mei_mutex); \ 888 _handler(cldev, (const void *)hdr); \ 889 mutex_unlock(&iwl_mei_mutex); \ 890 break 891 892#define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \ 893 case SAP_MSG_NOTIF_ ## _cmd: \ 894 if (len < _sz) { \ 895 dev_err(&cldev->dev, \ 896 "Bad size for %d: %u < %u\n", \ 897 le16_to_cpu(hdr->type), \ 898 (unsigned int)len, \ 899 (unsigned int)_sz); \ 900 break; \ 901 } \ 902 _handler(cldev, (const void *)hdr); \ 903 break 904 905#define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \ 906 case SAP_MSG_NOTIF_ ## _cmd: \ 907 if (len < _sz) { \ 908 dev_err(&cldev->dev, \ 909 "Bad size for %d: %u < %u\n", \ 910 le16_to_cpu(hdr->type), \ 911 (unsigned int)len, \ 912 (unsigned int)_sz); \ 913 break; \ 914 } \ 915 break 916 917 switch (type) { 918 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0); 919 SAP_MSG_HANDLER(CSME_FILTERS, 920 iwl_mei_handle_csme_filters, 921 sizeof(struct iwl_sap_csme_filters)); 922 SAP_MSG_HANDLER(CSME_CONN_STATUS, 923 iwl_mei_handle_conn_status, 924 sizeof(struct iwl_sap_notif_conn_status)); 925 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE, 926 iwl_mei_handle_amt_state, 927 sizeof(struct iwl_sap_msg_dw)); 928 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0); 929 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm, 930 sizeof(struct iwl_sap_nvm)); 931 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ, 932 iwl_mei_handle_rx_host_own_req, 933 sizeof(struct iwl_sap_msg_dw)); 934 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner, 935 sizeof(struct iwl_sap_msg_dw)); 936 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP, 937 iwl_mei_handle_can_release_ownership, 0); 938 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP, 939 iwl_mei_handle_csme_taking_ownership, 0); 940 default: 941 /* 942 * This is not really an error, there are message that we decided 943 * to ignore, yet, it is useful to be able to leave a note if debug 944 * is enabled. 945 */ 946 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n", 947 le16_to_cpu(hdr->type), len); 948 } 949 950#undef SAP_MSG_HANDLER 951#undef SAP_MSG_HANDLER_NO_LOCK 952} 953 954static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz, 955 u32 *_rd, u32 wr, 956 void *_buf, u32 len) 957{ 958 u8 *buf = _buf; 959 u32 rd = *_rd; 960 961 if (rd + len <= q_sz) { 962 memcpy(buf, q_head + rd, len); 963 rd += len; 964 } else { 965 memcpy(buf, q_head + rd, q_sz - rd); 966 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd)); 967 rd = len - (q_sz - rd); 968 } 969 970 *_rd = rd; 971} 972 973#define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \ 974 IEEE80211_TKIP_IV_LEN + \ 975 sizeof(rfc1042_header) + ETH_TLEN) 976 977static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, 978 const u8 *q_head, u32 q_sz, 979 u32 rd, u32 wr, ssize_t valid_rx_sz, 980 struct sk_buff_head *tx_skbs) 981{ 982 struct iwl_sap_hdr hdr; 983 struct net_device *netdev = 984 rcu_dereference_protected(iwl_mei_cache.netdev, 985 lockdep_is_held(&iwl_mei_mutex)); 986 987 if (!netdev) 988 return; 989 990 while (valid_rx_sz >= sizeof(hdr)) { 991 struct ethhdr *ethhdr; 992 unsigned char *data; 993 struct sk_buff *skb; 994 u16 len; 995 996 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr)); 997 valid_rx_sz -= sizeof(hdr); 998 len = le16_to_cpu(hdr.len); 999 1000 if (valid_rx_sz < len) { 1001 dev_err(&cldev->dev, 1002 "Data queue is corrupted: valid data len %zd, len %d\n", 1003 valid_rx_sz, len); 1004 break; 1005 } 1006 1007 if (len < sizeof(*ethhdr)) { 1008 dev_err(&cldev->dev, 1009 "Data len is smaller than an ethernet header? len = %d\n", 1010 len); 1011 } 1012 1013 valid_rx_sz -= len; 1014 1015 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) { 1016 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n", 1017 le16_to_cpu(hdr.type), len); 1018 continue; 1019 } 1020 1021 /* We need enough room for the WiFi header + SNAP + IV */ 1022 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); 1023 if (!skb) 1024 continue; 1025 1026 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); 1027 ethhdr = skb_push(skb, sizeof(*ethhdr)); 1028 1029 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, 1030 ethhdr, sizeof(*ethhdr)); 1031 len -= sizeof(*ethhdr); 1032 1033 skb_reset_mac_header(skb); 1034 skb_reset_network_header(skb); 1035 skb->protocol = ethhdr->h_proto; 1036 1037 data = skb_put(skb, len); 1038 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len); 1039 1040 /* 1041 * Enqueue the skb here so that it can be sent later when we 1042 * do not hold the mutex. TX'ing a packet with a mutex held is 1043 * possible, but it wouldn't be nice to forbid the TX path to 1044 * call any of iwlmei's functions, since every API from iwlmei 1045 * needs the mutex. 1046 */ 1047 __skb_queue_tail(tx_skbs, skb); 1048 } 1049} 1050 1051static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev, 1052 const u8 *q_head, u32 q_sz, 1053 u32 rd, u32 wr, ssize_t valid_rx_sz) 1054{ 1055 struct page *p = alloc_page(GFP_KERNEL); 1056 struct iwl_sap_hdr *hdr; 1057 1058 if (!p) 1059 return; 1060 1061 hdr = page_address(p); 1062 1063 while (valid_rx_sz >= sizeof(*hdr)) { 1064 u16 len; 1065 1066 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr)); 1067 valid_rx_sz -= sizeof(*hdr); 1068 len = le16_to_cpu(hdr->len); 1069 1070 if (valid_rx_sz < len) 1071 break; 1072 1073 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len); 1074 1075 trace_iwlmei_sap_cmd(hdr, false); 1076 iwl_mei_handle_sap_msg(cldev, hdr); 1077 valid_rx_sz -= len; 1078 } 1079 1080 /* valid_rx_sz must be 0 now... */ 1081 if (valid_rx_sz) 1082 dev_err(&cldev->dev, 1083 "More data in the buffer although we read it all\n"); 1084 1085 __free_page(p); 1086} 1087 1088static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev, 1089 struct iwl_sap_q_ctrl_blk *notif_q, 1090 const u8 *q_head, 1091 struct sk_buff_head *skbs, 1092 u32 q_sz) 1093{ 1094 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 1095 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 1096 ssize_t valid_rx_sz; 1097 1098 if (rd > q_sz || wr > q_sz) { 1099 dev_err(&cldev->dev, 1100 "Pointers are past the buffer limit\n"); 1101 return; 1102 } 1103 1104 if (rd == wr) 1105 return; 1106 1107 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr; 1108 1109 if (skbs) 1110 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr, 1111 valid_rx_sz, skbs); 1112 else 1113 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr, 1114 valid_rx_sz); 1115 1116 /* Increment the read pointer to point to the write pointer */ 1117 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr)); 1118} 1119 1120static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev) 1121{ 1122 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1123 struct iwl_sap_q_ctrl_blk *notif_q; 1124 struct sk_buff_head tx_skbs; 1125 struct iwl_sap_dir *dir; 1126 void *q_head; 1127 u32 q_sz; 1128 1129 if (!mei->shared_mem.ctrl) 1130 return; 1131 1132 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1133 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 1134 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; 1135 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; 1136 1137 /* 1138 * Do not hold the mutex here, but rather each and every message 1139 * handler takes it. 1140 * This allows message handlers to take it at a certain time. 1141 */ 1142 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL, q_sz); 1143 1144 mutex_lock(&iwl_mei_mutex); 1145 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1146 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 1147 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; 1148 q_sz = mei->shared_mem.q_size[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; 1149 1150 __skb_queue_head_init(&tx_skbs); 1151 1152 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs, q_sz); 1153 1154 if (skb_queue_empty(&tx_skbs)) { 1155 mutex_unlock(&iwl_mei_mutex); 1156 return; 1157 } 1158 1159 /* 1160 * Take the RCU read lock before we unlock the mutex to make sure that 1161 * even if the netdev is replaced by another non-NULL netdev right after 1162 * we unlock the mutex, the old netdev will still be valid when we 1163 * transmit the frames. We can't allow to replace the netdev here because 1164 * the skbs hold a pointer to the netdev. 1165 */ 1166 rcu_read_lock(); 1167 1168 mutex_unlock(&iwl_mei_mutex); 1169 1170 if (!rcu_access_pointer(iwl_mei_cache.netdev)) { 1171 dev_err(&cldev->dev, "Can't Tx without a netdev\n"); 1172 skb_queue_purge(&tx_skbs); 1173 goto out; 1174 } 1175 1176 while (!skb_queue_empty(&tx_skbs)) { 1177 struct sk_buff *skb = __skb_dequeue(&tx_skbs); 1178 1179 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR); 1180 dev_queue_xmit(skb); 1181 } 1182 1183out: 1184 rcu_read_unlock(); 1185} 1186 1187static void iwl_mei_rx(struct mei_cl_device *cldev) 1188{ 1189 struct iwl_sap_me_msg_hdr *hdr; 1190 u8 msg[100]; 1191 ssize_t ret; 1192 1193 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg)); 1194 if (ret < 0) { 1195 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret); 1196 return; 1197 } 1198 1199 if (ret == 0) { 1200 dev_err(&cldev->dev, "got an empty response\n"); 1201 return; 1202 } 1203 1204 hdr = (void *)msg; 1205 trace_iwlmei_me_msg(hdr, false); 1206 1207 switch (le32_to_cpu(hdr->type)) { 1208 case SAP_ME_MSG_START_OK: 1209 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) > 1210 sizeof(msg)); 1211 1212 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret); 1213 break; 1214 case SAP_ME_MSG_CHECK_SHARED_AREA: 1215 iwl_mei_handle_check_shared_area(cldev); 1216 break; 1217 default: 1218 dev_err(&cldev->dev, "got a RX notification: %d\n", 1219 le32_to_cpu(hdr->type)); 1220 break; 1221 } 1222} 1223 1224static int iwl_mei_send_start(struct mei_cl_device *cldev) 1225{ 1226 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1227 struct iwl_sap_me_msg_start msg = { 1228 .hdr.type = cpu_to_le32(SAP_ME_MSG_START), 1229 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 1230 .hdr.len = cpu_to_le32(sizeof(msg)), 1231 .supported_versions[0] = SAP_VERSION, 1232 .init_data_seq_num = cpu_to_le16(0x100), 1233 .init_notif_seq_num = cpu_to_le16(0x800), 1234 }; 1235 int ret; 1236 1237 trace_iwlmei_me_msg(&msg.hdr, true); 1238 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 1239 if (ret != sizeof(msg)) { 1240 dev_err(&cldev->dev, 1241 "failed to send the SAP_ME_MSG_START message %d\n", 1242 ret); 1243 return ret; 1244 } 1245 1246 return 0; 1247} 1248 1249static int iwl_mei_enable(struct mei_cl_device *cldev) 1250{ 1251 int ret; 1252 1253 ret = mei_cldev_enable(cldev); 1254 if (ret < 0) { 1255 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret); 1256 return ret; 1257 } 1258 1259 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx); 1260 if (ret) { 1261 dev_err(&cldev->dev, 1262 "failed to register to the rx cb: %d\n", ret); 1263 mei_cldev_disable(cldev); 1264 return ret; 1265 } 1266 1267 return 0; 1268} 1269 1270struct iwl_mei_nvm *iwl_mei_get_nvm(void) 1271{ 1272 struct iwl_mei_nvm *nvm = NULL; 1273 struct iwl_mei *mei; 1274 int ret; 1275 1276 mutex_lock(&iwl_mei_mutex); 1277 1278 if (!iwl_mei_is_connected()) 1279 goto out; 1280 1281 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1282 1283 if (!mei) 1284 goto out; 1285 1286 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev, 1287 SAP_MSG_NOTIF_GET_NVM); 1288 if (ret) 1289 goto out; 1290 1291 mutex_unlock(&iwl_mei_mutex); 1292 1293 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ); 1294 if (!ret) 1295 return NULL; 1296 1297 mutex_lock(&iwl_mei_mutex); 1298 1299 if (!iwl_mei_is_connected()) 1300 goto out; 1301 1302 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1303 1304 if (!mei) 1305 goto out; 1306 1307 if (mei->nvm) 1308 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL); 1309 1310out: 1311 mutex_unlock(&iwl_mei_mutex); 1312 return nvm; 1313} 1314EXPORT_SYMBOL_GPL(iwl_mei_get_nvm); 1315 1316int iwl_mei_get_ownership(void) 1317{ 1318 struct iwl_mei *mei; 1319 int ret; 1320 1321 mutex_lock(&iwl_mei_mutex); 1322 1323 /* In case we didn't have a bind */ 1324 if (!iwl_mei_is_connected()) { 1325 ret = 0; 1326 goto out; 1327 } 1328 1329 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1330 1331 if (!mei) { 1332 ret = -ENODEV; 1333 goto out; 1334 } 1335 1336 if (!mei->amt_enabled) { 1337 ret = 0; 1338 goto out; 1339 } 1340 1341 if (mei->got_ownership) { 1342 ret = 0; 1343 goto out; 1344 } 1345 1346 ret = iwl_mei_send_sap_msg(mei->cldev, 1347 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 1348 if (ret) 1349 goto out; 1350 1351 mutex_unlock(&iwl_mei_mutex); 1352 1353 ret = wait_event_timeout(mei->get_ownership_wq, 1354 mei->got_ownership, HZ / 2); 1355 if (!ret) 1356 return -ETIMEDOUT; 1357 1358 mutex_lock(&iwl_mei_mutex); 1359 1360 /* In case we didn't have a bind */ 1361 if (!iwl_mei_is_connected()) { 1362 ret = 0; 1363 goto out; 1364 } 1365 1366 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1367 1368 if (!mei) { 1369 ret = -ENODEV; 1370 goto out; 1371 } 1372 1373 ret = !mei->got_ownership; 1374 1375out: 1376 mutex_unlock(&iwl_mei_mutex); 1377 return ret; 1378} 1379EXPORT_SYMBOL_GPL(iwl_mei_get_ownership); 1380 1381void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, 1382 const struct iwl_mei_colloc_info *colloc_info) 1383{ 1384 struct iwl_sap_notif_host_link_up msg = { 1385 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 1386 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1387 .conn_info = { 1388 .ssid_len = cpu_to_le32(conn_info->ssid_len), 1389 .channel = conn_info->channel, 1390 .band = conn_info->band, 1391 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher), 1392 .auth_mode = cpu_to_le32(conn_info->auth_mode), 1393 }, 1394 }; 1395 struct iwl_mei *mei; 1396 1397 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid)) 1398 return; 1399 1400 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len); 1401 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN); 1402 1403 if (colloc_info) { 1404 msg.colloc_channel = colloc_info->channel; 1405 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1; 1406 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN); 1407 } 1408 1409 mutex_lock(&iwl_mei_mutex); 1410 1411 if (!iwl_mei_is_connected()) 1412 goto out; 1413 1414 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1415 1416 if (!mei) 1417 goto out; 1418 1419 if (!mei->amt_enabled) 1420 goto out; 1421 1422 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1423 1424out: 1425 kfree(iwl_mei_cache.conn_info); 1426 iwl_mei_cache.conn_info = 1427 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL); 1428 mutex_unlock(&iwl_mei_mutex); 1429} 1430EXPORT_SYMBOL_GPL(iwl_mei_host_associated); 1431 1432void iwl_mei_host_disassociated(void) 1433{ 1434 struct iwl_mei *mei; 1435 struct iwl_sap_notif_host_link_down msg = { 1436 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN), 1437 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1438 .type = HOST_LINK_DOWN_TYPE_LONG, 1439 }; 1440 1441 mutex_lock(&iwl_mei_mutex); 1442 1443 if (!iwl_mei_is_connected()) 1444 goto out; 1445 1446 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1447 1448 if (!mei) 1449 goto out; 1450 1451 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1452 1453out: 1454 kfree(iwl_mei_cache.conn_info); 1455 iwl_mei_cache.conn_info = NULL; 1456 mutex_unlock(&iwl_mei_mutex); 1457} 1458EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated); 1459 1460void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) 1461{ 1462 struct iwl_mei *mei; 1463 u32 rfkill_state = 0; 1464 struct iwl_sap_msg_dw msg = { 1465 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 1466 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1467 }; 1468 1469 if (!sw_rfkill) 1470 rfkill_state |= SAP_SW_RFKILL_DEASSERTED; 1471 1472 if (!hw_rfkill) 1473 rfkill_state |= SAP_HW_RFKILL_DEASSERTED; 1474 1475 mutex_lock(&iwl_mei_mutex); 1476 1477 if (!iwl_mei_is_connected()) 1478 goto out; 1479 1480 msg.val = cpu_to_le32(rfkill_state); 1481 1482 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1483 1484 if (!mei) 1485 goto out; 1486 1487 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1488 1489out: 1490 iwl_mei_cache.rf_kill = rfkill_state; 1491 mutex_unlock(&iwl_mei_mutex); 1492} 1493EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state); 1494 1495void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) 1496{ 1497 struct iwl_mei *mei; 1498 struct iwl_sap_notif_host_nic_info msg = { 1499 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 1500 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1501 }; 1502 1503 mutex_lock(&iwl_mei_mutex); 1504 1505 if (!iwl_mei_is_connected()) 1506 goto out; 1507 1508 ether_addr_copy(msg.mac_address, mac_address); 1509 ether_addr_copy(msg.nvm_address, nvm_address); 1510 1511 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1512 1513 if (!mei) 1514 goto out; 1515 1516 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1517 1518out: 1519 ether_addr_copy(iwl_mei_cache.mac_address, mac_address); 1520 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address); 1521 mutex_unlock(&iwl_mei_mutex); 1522} 1523EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info); 1524 1525void iwl_mei_set_country_code(u16 mcc) 1526{ 1527 struct iwl_mei *mei; 1528 struct iwl_sap_notif_country_code msg = { 1529 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 1530 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1531 .mcc = cpu_to_le16(mcc), 1532 }; 1533 1534 mutex_lock(&iwl_mei_mutex); 1535 1536 if (!iwl_mei_is_connected()) 1537 goto out; 1538 1539 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1540 1541 if (!mei) 1542 goto out; 1543 1544 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1545 1546out: 1547 iwl_mei_cache.mcc = mcc; 1548 mutex_unlock(&iwl_mei_mutex); 1549} 1550EXPORT_SYMBOL_GPL(iwl_mei_set_country_code); 1551 1552void iwl_mei_set_power_limit(const __le16 *power_limit) 1553{ 1554 struct iwl_mei *mei; 1555 struct iwl_sap_notif_sar_limits msg = { 1556 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 1557 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1558 }; 1559 1560 mutex_lock(&iwl_mei_mutex); 1561 1562 if (!iwl_mei_is_connected()) 1563 goto out; 1564 1565 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1566 1567 if (!mei) 1568 goto out; 1569 1570 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); 1571 1572 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1573 1574out: 1575 kfree(iwl_mei_cache.power_limit); 1576 iwl_mei_cache.power_limit = kmemdup(power_limit, 1577 sizeof(msg.sar_chain_info_table), GFP_KERNEL); 1578 mutex_unlock(&iwl_mei_mutex); 1579} 1580EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit); 1581 1582void iwl_mei_set_netdev(struct net_device *netdev) 1583{ 1584 struct iwl_mei *mei; 1585 1586 mutex_lock(&iwl_mei_mutex); 1587 1588 if (!iwl_mei_is_connected()) { 1589 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1590 goto out; 1591 } 1592 1593 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1594 1595 if (!mei) 1596 goto out; 1597 1598 if (!netdev) { 1599 struct net_device *dev = 1600 rcu_dereference_protected(iwl_mei_cache.netdev, 1601 lockdep_is_held(&iwl_mei_mutex)); 1602 1603 if (!dev) 1604 goto out; 1605 1606 netdev_rx_handler_unregister(dev); 1607 } 1608 1609 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1610 1611 if (netdev && mei->amt_enabled) 1612 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 1613 1614out: 1615 mutex_unlock(&iwl_mei_mutex); 1616} 1617EXPORT_SYMBOL_GPL(iwl_mei_set_netdev); 1618 1619void iwl_mei_device_down(void) 1620{ 1621 struct iwl_mei *mei; 1622 1623 mutex_lock(&iwl_mei_mutex); 1624 1625 if (!iwl_mei_is_connected()) 1626 goto out; 1627 1628 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1629 1630 if (!mei) 1631 goto out; 1632 1633 if (!mei->csme_taking_ownership) 1634 goto out; 1635 1636 iwl_mei_send_sap_msg(mei->cldev, 1637 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED); 1638 mei->csme_taking_ownership = false; 1639out: 1640 mutex_unlock(&iwl_mei_mutex); 1641} 1642EXPORT_SYMBOL_GPL(iwl_mei_device_down); 1643 1644int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) 1645{ 1646 int ret; 1647 1648 /* 1649 * We must have a non-NULL priv pointer to not crash when there are 1650 * multiple WiFi devices. 1651 */ 1652 if (!priv) 1653 return -EINVAL; 1654 1655 mutex_lock(&iwl_mei_mutex); 1656 1657 /* do not allow registration if someone else already registered */ 1658 if (iwl_mei_cache.priv || iwl_mei_cache.ops) { 1659 ret = -EBUSY; 1660 goto out; 1661 } 1662 1663 iwl_mei_cache.priv = priv; 1664 iwl_mei_cache.ops = ops; 1665 1666 if (iwl_mei_global_cldev) { 1667 struct iwl_mei *mei = 1668 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1669 1670 /* we have already a SAP connection */ 1671 if (iwl_mei_is_connected()) { 1672 iwl_mei_send_sap_msg(mei->cldev, 1673 SAP_MSG_NOTIF_WIFIDR_UP); 1674 ops->rfkill(priv, mei->link_prot_state); 1675 } 1676 } 1677 ret = 0; 1678 1679out: 1680 mutex_unlock(&iwl_mei_mutex); 1681 return ret; 1682} 1683EXPORT_SYMBOL_GPL(iwl_mei_register); 1684 1685void iwl_mei_start_unregister(void) 1686{ 1687 mutex_lock(&iwl_mei_mutex); 1688 1689 /* At this point, the wifi driver should have removed the netdev */ 1690 if (rcu_access_pointer(iwl_mei_cache.netdev)) 1691 pr_err("Still had a netdev pointer set upon unregister\n"); 1692 1693 kfree(iwl_mei_cache.conn_info); 1694 iwl_mei_cache.conn_info = NULL; 1695 kfree(iwl_mei_cache.power_limit); 1696 iwl_mei_cache.power_limit = NULL; 1697 iwl_mei_cache.ops = NULL; 1698 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */ 1699 1700 mutex_unlock(&iwl_mei_mutex); 1701} 1702EXPORT_SYMBOL_GPL(iwl_mei_start_unregister); 1703 1704void iwl_mei_unregister_complete(void) 1705{ 1706 mutex_lock(&iwl_mei_mutex); 1707 1708 iwl_mei_cache.priv = NULL; 1709 1710 if (iwl_mei_global_cldev) { 1711 struct iwl_mei *mei = 1712 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1713 1714 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); 1715 mei->got_ownership = false; 1716 } 1717 1718 mutex_unlock(&iwl_mei_mutex); 1719} 1720EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete); 1721 1722#if IS_ENABLED(CONFIG_DEBUG_FS) 1723 1724static ssize_t 1725iwl_mei_dbgfs_send_start_message_write(struct file *file, 1726 const char __user *user_buf, 1727 size_t count, loff_t *ppos) 1728{ 1729 int ret; 1730 1731 mutex_lock(&iwl_mei_mutex); 1732 1733 if (!iwl_mei_global_cldev) { 1734 ret = -ENODEV; 1735 goto out; 1736 } 1737 1738 ret = iwl_mei_send_start(iwl_mei_global_cldev); 1739 1740out: 1741 mutex_unlock(&iwl_mei_mutex); 1742 return ret ?: count; 1743} 1744 1745static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = { 1746 .write = iwl_mei_dbgfs_send_start_message_write, 1747 .open = simple_open, 1748 .llseek = default_llseek, 1749}; 1750 1751static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file, 1752 const char __user *user_buf, 1753 size_t count, loff_t *ppos) 1754{ 1755 iwl_mei_get_ownership(); 1756 1757 return count; 1758} 1759 1760static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = { 1761 .write = iwl_mei_dbgfs_req_ownership_write, 1762 .open = simple_open, 1763 .llseek = default_llseek, 1764}; 1765 1766static void iwl_mei_dbgfs_register(struct iwl_mei *mei) 1767{ 1768 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 1769 1770 if (!mei->dbgfs_dir) 1771 return; 1772 1773 debugfs_create_ulong("status", S_IRUSR, 1774 mei->dbgfs_dir, &iwl_mei_status); 1775 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir, 1776 mei, &iwl_mei_dbgfs_send_start_message_ops); 1777 debugfs_create_file("req_ownership", S_IWUSR, mei->dbgfs_dir, 1778 mei, &iwl_mei_dbgfs_req_ownership_ops); 1779} 1780 1781static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) 1782{ 1783 debugfs_remove_recursive(mei->dbgfs_dir); 1784 mei->dbgfs_dir = NULL; 1785} 1786 1787#else 1788 1789static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {} 1790static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} 1791 1792#endif /* CONFIG_DEBUG_FS */ 1793 1794#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3 1795 1796/* 1797 * iwl_mei_probe - the probe function called by the mei bus enumeration 1798 * 1799 * This allocates the data needed by iwlmei and sets a pointer to this data 1800 * into the mei_cl_device's drvdata. 1801 * It starts the SAP protocol by sending the SAP_ME_MSG_START without 1802 * waiting for the answer. The answer will be caught later by the Rx callback. 1803 */ 1804static int iwl_mei_probe(struct mei_cl_device *cldev, 1805 const struct mei_cl_device_id *id) 1806{ 1807 int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM; 1808 struct iwl_mei *mei; 1809 int ret; 1810 1811 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL); 1812 if (!mei) 1813 return -ENOMEM; 1814 1815 init_waitqueue_head(&mei->get_nvm_wq); 1816 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk); 1817 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk, 1818 iwl_mei_csa_throttle_end_wk); 1819 init_waitqueue_head(&mei->get_ownership_wq); 1820 spin_lock_init(&mei->data_q_lock); 1821 1822 mei_cldev_set_drvdata(cldev, mei); 1823 mei->cldev = cldev; 1824 1825 do { 1826 ret = iwl_mei_alloc_shared_mem(cldev); 1827 if (!ret) 1828 break; 1829 /* 1830 * The CSME firmware needs to boot the internal WLAN client. 1831 * This can take time in certain configurations (usually 1832 * upon resume and when the whole CSME firmware is shut down 1833 * during suspend). 1834 * 1835 * Wait a bit before retrying and hope we'll succeed next time. 1836 */ 1837 1838 dev_dbg(&cldev->dev, 1839 "Couldn't allocate the shared memory: %d, attempt %d / %d\n", 1840 ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM); 1841 msleep(100); 1842 alloc_retry--; 1843 } while (alloc_retry); 1844 1845 if (ret) { 1846 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", 1847 ret); 1848 goto free; 1849 } 1850 1851 iwl_mei_init_shared_mem(mei); 1852 1853 ret = iwl_mei_enable(cldev); 1854 if (ret) 1855 goto free_shared_mem; 1856 1857 iwl_mei_dbgfs_register(mei); 1858 1859 /* 1860 * We now have a Rx function in place, start the SAP procotol 1861 * we expect to get the SAP_ME_MSG_START_OK response later on. 1862 */ 1863 mutex_lock(&iwl_mei_mutex); 1864 ret = iwl_mei_send_start(cldev); 1865 mutex_unlock(&iwl_mei_mutex); 1866 if (ret) 1867 goto debugfs_unregister; 1868 1869 /* must be last */ 1870 iwl_mei_global_cldev = cldev; 1871 1872 return 0; 1873 1874debugfs_unregister: 1875 iwl_mei_dbgfs_unregister(mei); 1876 mei_cldev_disable(cldev); 1877free_shared_mem: 1878 iwl_mei_free_shared_mem(cldev); 1879free: 1880 mei_cldev_set_drvdata(cldev, NULL); 1881 devm_kfree(&cldev->dev, mei); 1882 1883 return ret; 1884} 1885 1886#define SEND_SAP_MAX_WAIT_ITERATION 10 1887 1888static void iwl_mei_remove(struct mei_cl_device *cldev) 1889{ 1890 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1891 int i; 1892 1893 /* 1894 * We are being removed while the bus is active, it means we are 1895 * going to suspend/ shutdown, so the NIC will disappear. 1896 */ 1897 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) 1898 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv); 1899 1900 if (rcu_access_pointer(iwl_mei_cache.netdev)) { 1901 struct net_device *dev; 1902 1903 /* 1904 * First take rtnl and only then the mutex to avoid an ABBA 1905 * with iwl_mei_set_netdev() 1906 */ 1907 rtnl_lock(); 1908 mutex_lock(&iwl_mei_mutex); 1909 1910 /* 1911 * If we are suspending and the wifi driver hasn't removed it's netdev 1912 * yet, do it now. In any case, don't change the cache.netdev pointer. 1913 */ 1914 dev = rcu_dereference_protected(iwl_mei_cache.netdev, 1915 lockdep_is_held(&iwl_mei_mutex)); 1916 1917 netdev_rx_handler_unregister(dev); 1918 mutex_unlock(&iwl_mei_mutex); 1919 rtnl_unlock(); 1920 } 1921 1922 mutex_lock(&iwl_mei_mutex); 1923 1924 /* 1925 * Tell CSME that we are going down so that it won't access the 1926 * memory anymore, make sure this message goes through immediately. 1927 */ 1928 mei->csa_throttled = false; 1929 iwl_mei_send_sap_msg(mei->cldev, 1930 SAP_MSG_NOTIF_HOST_GOES_DOWN); 1931 1932 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { 1933 if (!iwl_mei_host_to_me_data_pending(mei)) 1934 break; 1935 1936 msleep(5); 1937 } 1938 1939 /* 1940 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message, 1941 * it means that it will probably keep reading memory that we are going 1942 * to unmap and free, expect IOMMU error messages. 1943 */ 1944 if (i == SEND_SAP_MAX_WAIT_ITERATION) 1945 dev_err(&mei->cldev->dev, 1946 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); 1947 1948 mutex_unlock(&iwl_mei_mutex); 1949 1950 /* 1951 * This looks strange, but this lock is taken here to make sure that 1952 * iwl_mei_add_data_to_ring called from the Tx path sees that we 1953 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit. 1954 * Rx isn't a problem because the rx_handler can't be called after 1955 * having been unregistered. 1956 */ 1957 spin_lock_bh(&mei->data_q_lock); 1958 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 1959 spin_unlock_bh(&mei->data_q_lock); 1960 1961 if (iwl_mei_cache.ops) 1962 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 1963 1964 /* 1965 * mei_cldev_disable will return only after all the MEI Rx is done. 1966 * It must be called when iwl_mei_mutex is *not* held, since it waits 1967 * for our Rx handler to complete. 1968 * After it returns, no new Rx will start. 1969 */ 1970 mei_cldev_disable(cldev); 1971 1972 /* 1973 * Since the netdev was already removed and the netdev's removal 1974 * includes a call to synchronize_net() so that we know there won't be 1975 * any new Rx that will trigger the following workers. 1976 */ 1977 cancel_work_sync(&mei->send_csa_msg_wk); 1978 cancel_delayed_work_sync(&mei->csa_throttle_end_wk); 1979 1980 /* 1981 * If someone waits for the ownership, let him know that we are going 1982 * down and that we are not connected anymore. He'll be able to take 1983 * the device. 1984 */ 1985 wake_up_all(&mei->get_ownership_wq); 1986 1987 mutex_lock(&iwl_mei_mutex); 1988 1989 iwl_mei_global_cldev = NULL; 1990 1991 wake_up_all(&mei->get_nvm_wq); 1992 1993 iwl_mei_free_shared_mem(cldev); 1994 1995 iwl_mei_dbgfs_unregister(mei); 1996 1997 mei_cldev_set_drvdata(cldev, NULL); 1998 1999 kfree(mei->nvm); 2000 2001 kfree(rcu_access_pointer(mei->filters)); 2002 2003 devm_kfree(&cldev->dev, mei); 2004 2005 mutex_unlock(&iwl_mei_mutex); 2006} 2007 2008static const struct mei_cl_device_id iwl_mei_tbl[] = { 2009 { 2010 .name = KBUILD_MODNAME, 2011 .uuid = MEI_WLAN_UUID, 2012 .version = MEI_CL_VERSION_ANY, 2013 }, 2014 2015 /* required last entry */ 2016 { } 2017}; 2018 2019/* 2020 * Do not export the device table because this module is loaded by 2021 * iwlwifi's dependency. 2022 */ 2023 2024static struct mei_cl_driver iwl_mei_cl_driver = { 2025 .id_table = iwl_mei_tbl, 2026 .name = KBUILD_MODNAME, 2027 .probe = iwl_mei_probe, 2028 .remove = iwl_mei_remove, 2029}; 2030 2031module_mei_cl_driver(iwl_mei_cl_driver);