testmode.c (17026B)
1// SPDX-License-Identifier: ISC 2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ 3#include "mt76.h" 4 5const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { 6 [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG }, 7 [MT76_TM_ATTR_STATE] = { .type = NLA_U8 }, 8 [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 }, 9 [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 }, 10 [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 }, 11 [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 }, 12 [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 }, 13 [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 }, 14 [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 }, 15 [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 }, 16 [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 }, 17 [MT76_TM_ATTR_TX_SPE_IDX] = { .type = NLA_U8 }, 18 [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 }, 19 [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED }, 20 [MT76_TM_ATTR_TX_DUTY_CYCLE] = { .type = NLA_U8 }, 21 [MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 }, 22 [MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 }, 23 [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 }, 24 [MT76_TM_ATTR_DRV_DATA] = { .type = NLA_NESTED }, 25}; 26EXPORT_SYMBOL_GPL(mt76_tm_policy); 27 28void mt76_testmode_tx_pending(struct mt76_phy *phy) 29{ 30 struct mt76_testmode_data *td = &phy->test; 31 struct mt76_dev *dev = phy->dev; 32 struct mt76_wcid *wcid = &dev->global_wcid; 33 struct sk_buff *skb = td->tx_skb; 34 struct mt76_queue *q; 35 u16 tx_queued_limit; 36 int qid; 37 38 if (!skb || !td->tx_pending) 39 return; 40 41 qid = skb_get_queue_mapping(skb); 42 q = phy->q_tx[qid]; 43 44 tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000; 45 46 spin_lock_bh(&q->lock); 47 48 while (td->tx_pending > 0 && 49 td->tx_queued - td->tx_done < tx_queued_limit && 50 q->queued < q->ndesc / 2) { 51 int ret; 52 53 ret = dev->queue_ops->tx_queue_skb(dev, q, skb_get(skb), wcid, 54 NULL); 55 if (ret < 0) 56 break; 57 58 td->tx_pending--; 59 td->tx_queued++; 60 } 61 62 dev->queue_ops->kick(dev, q); 63 64 spin_unlock_bh(&q->lock); 65} 66 67static u32 68mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode) 69{ 70 switch (tx_rate_mode) { 71 case MT76_TM_TX_MODE_HT: 72 return IEEE80211_MAX_MPDU_LEN_HT_7935; 73 case MT76_TM_TX_MODE_VHT: 74 case MT76_TM_TX_MODE_HE_SU: 75 case MT76_TM_TX_MODE_HE_EXT_SU: 76 case MT76_TM_TX_MODE_HE_TB: 77 case MT76_TM_TX_MODE_HE_MU: 78 if (phy->sband_5g.sband.vht_cap.cap & 79 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991) 80 return IEEE80211_MAX_MPDU_LEN_VHT_7991; 81 return IEEE80211_MAX_MPDU_LEN_VHT_11454; 82 case MT76_TM_TX_MODE_CCK: 83 case MT76_TM_TX_MODE_OFDM: 84 default: 85 return IEEE80211_MAX_FRAME_LEN; 86 } 87} 88 89static void 90mt76_testmode_free_skb(struct mt76_phy *phy) 91{ 92 struct mt76_testmode_data *td = &phy->test; 93 94 dev_kfree_skb(td->tx_skb); 95 td->tx_skb = NULL; 96} 97 98int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len) 99{ 100#define MT_TXP_MAX_LEN 4095 101 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 102 IEEE80211_FCTL_FROMDS; 103 struct mt76_testmode_data *td = &phy->test; 104 bool ext_phy = phy != &phy->dev->phy; 105 struct sk_buff **frag_tail, *head; 106 struct ieee80211_tx_info *info; 107 struct ieee80211_hdr *hdr; 108 u32 max_len, head_len; 109 int nfrags, i; 110 111 max_len = mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode); 112 if (len > max_len) 113 len = max_len; 114 else if (len < sizeof(struct ieee80211_hdr)) 115 len = sizeof(struct ieee80211_hdr); 116 117 nfrags = len / MT_TXP_MAX_LEN; 118 head_len = nfrags ? MT_TXP_MAX_LEN : len; 119 120 if (len > IEEE80211_MAX_FRAME_LEN) 121 fc |= IEEE80211_STYPE_QOS_DATA; 122 123 head = alloc_skb(head_len, GFP_KERNEL); 124 if (!head) 125 return -ENOMEM; 126 127 hdr = __skb_put_zero(head, head_len); 128 hdr->frame_control = cpu_to_le16(fc); 129 memcpy(hdr->addr1, td->addr[0], ETH_ALEN); 130 memcpy(hdr->addr2, td->addr[1], ETH_ALEN); 131 memcpy(hdr->addr3, td->addr[2], ETH_ALEN); 132 skb_set_queue_mapping(head, IEEE80211_AC_BE); 133 134 info = IEEE80211_SKB_CB(head); 135 info->flags = IEEE80211_TX_CTL_INJECTED | 136 IEEE80211_TX_CTL_NO_ACK | 137 IEEE80211_TX_CTL_NO_PS_BUFFER; 138 139 if (ext_phy) 140 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 141 142 frag_tail = &skb_shinfo(head)->frag_list; 143 144 for (i = 0; i < nfrags; i++) { 145 struct sk_buff *frag; 146 u16 frag_len; 147 148 if (i == nfrags - 1) 149 frag_len = len % MT_TXP_MAX_LEN; 150 else 151 frag_len = MT_TXP_MAX_LEN; 152 153 frag = alloc_skb(frag_len, GFP_KERNEL); 154 if (!frag) { 155 mt76_testmode_free_skb(phy); 156 dev_kfree_skb(head); 157 return -ENOMEM; 158 } 159 160 __skb_put_zero(frag, frag_len); 161 head->len += frag->len; 162 head->data_len += frag->len; 163 164 *frag_tail = frag; 165 frag_tail = &(*frag_tail)->next; 166 } 167 168 mt76_testmode_free_skb(phy); 169 td->tx_skb = head; 170 171 return 0; 172} 173EXPORT_SYMBOL(mt76_testmode_alloc_skb); 174 175static int 176mt76_testmode_tx_init(struct mt76_phy *phy) 177{ 178 struct mt76_testmode_data *td = &phy->test; 179 struct ieee80211_tx_info *info; 180 struct ieee80211_tx_rate *rate; 181 u8 max_nss = hweight8(phy->antenna_mask); 182 int ret; 183 184 ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len); 185 if (ret) 186 return ret; 187 188 if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT) 189 goto out; 190 191 if (td->tx_antenna_mask) 192 max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); 193 194 info = IEEE80211_SKB_CB(td->tx_skb); 195 rate = &info->control.rates[0]; 196 rate->count = 1; 197 rate->idx = td->tx_rate_idx; 198 199 switch (td->tx_rate_mode) { 200 case MT76_TM_TX_MODE_CCK: 201 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 202 return -EINVAL; 203 204 if (rate->idx > 4) 205 return -EINVAL; 206 break; 207 case MT76_TM_TX_MODE_OFDM: 208 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 209 break; 210 211 if (rate->idx > 8) 212 return -EINVAL; 213 214 rate->idx += 4; 215 break; 216 case MT76_TM_TX_MODE_HT: 217 if (rate->idx > 8 * max_nss && 218 !(rate->idx == 32 && 219 phy->chandef.width >= NL80211_CHAN_WIDTH_40)) 220 return -EINVAL; 221 222 rate->flags |= IEEE80211_TX_RC_MCS; 223 break; 224 case MT76_TM_TX_MODE_VHT: 225 if (rate->idx > 9) 226 return -EINVAL; 227 228 if (td->tx_rate_nss > max_nss) 229 return -EINVAL; 230 231 ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss); 232 rate->flags |= IEEE80211_TX_RC_VHT_MCS; 233 break; 234 default: 235 break; 236 } 237 238 if (td->tx_rate_sgi) 239 rate->flags |= IEEE80211_TX_RC_SHORT_GI; 240 241 if (td->tx_rate_ldpc) 242 info->flags |= IEEE80211_TX_CTL_LDPC; 243 244 if (td->tx_rate_stbc) 245 info->flags |= IEEE80211_TX_CTL_STBC; 246 247 if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) { 248 switch (phy->chandef.width) { 249 case NL80211_CHAN_WIDTH_40: 250 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 251 break; 252 case NL80211_CHAN_WIDTH_80: 253 rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; 254 break; 255 case NL80211_CHAN_WIDTH_80P80: 256 case NL80211_CHAN_WIDTH_160: 257 rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; 258 break; 259 default: 260 break; 261 } 262 } 263out: 264 return 0; 265} 266 267static void 268mt76_testmode_tx_start(struct mt76_phy *phy) 269{ 270 struct mt76_testmode_data *td = &phy->test; 271 struct mt76_dev *dev = phy->dev; 272 273 td->tx_queued = 0; 274 td->tx_done = 0; 275 td->tx_pending = td->tx_count; 276 mt76_worker_schedule(&dev->tx_worker); 277} 278 279static void 280mt76_testmode_tx_stop(struct mt76_phy *phy) 281{ 282 struct mt76_testmode_data *td = &phy->test; 283 struct mt76_dev *dev = phy->dev; 284 285 mt76_worker_disable(&dev->tx_worker); 286 287 td->tx_pending = 0; 288 289 mt76_worker_enable(&dev->tx_worker); 290 291 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 292 MT76_TM_TIMEOUT * HZ); 293 294 mt76_testmode_free_skb(phy); 295} 296 297static inline void 298mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx) 299{ 300 td->param_set[idx / 32] |= BIT(idx % 32); 301} 302 303static inline bool 304mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx) 305{ 306 return td->param_set[idx / 32] & BIT(idx % 32); 307} 308 309static void 310mt76_testmode_init_defaults(struct mt76_phy *phy) 311{ 312 struct mt76_testmode_data *td = &phy->test; 313 314 if (td->tx_mpdu_len > 0) 315 return; 316 317 td->tx_mpdu_len = 1024; 318 td->tx_count = 1; 319 td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; 320 td->tx_rate_nss = 1; 321 322 memcpy(td->addr[0], phy->macaddr, ETH_ALEN); 323 memcpy(td->addr[1], phy->macaddr, ETH_ALEN); 324 memcpy(td->addr[2], phy->macaddr, ETH_ALEN); 325} 326 327static int 328__mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state) 329{ 330 enum mt76_testmode_state prev_state = phy->test.state; 331 struct mt76_dev *dev = phy->dev; 332 int err; 333 334 if (prev_state == MT76_TM_STATE_TX_FRAMES) 335 mt76_testmode_tx_stop(phy); 336 337 if (state == MT76_TM_STATE_TX_FRAMES) { 338 err = mt76_testmode_tx_init(phy); 339 if (err) 340 return err; 341 } 342 343 err = dev->test_ops->set_state(phy, state); 344 if (err) { 345 if (state == MT76_TM_STATE_TX_FRAMES) 346 mt76_testmode_tx_stop(phy); 347 348 return err; 349 } 350 351 if (state == MT76_TM_STATE_TX_FRAMES) 352 mt76_testmode_tx_start(phy); 353 else if (state == MT76_TM_STATE_RX_FRAMES) { 354 memset(&phy->test.rx_stats, 0, sizeof(phy->test.rx_stats)); 355 } 356 357 phy->test.state = state; 358 359 return 0; 360} 361 362int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state) 363{ 364 struct mt76_testmode_data *td = &phy->test; 365 struct ieee80211_hw *hw = phy->hw; 366 367 if (state == td->state && state == MT76_TM_STATE_OFF) 368 return 0; 369 370 if (state > MT76_TM_STATE_OFF && 371 (!test_bit(MT76_STATE_RUNNING, &phy->state) || 372 !(hw->conf.flags & IEEE80211_CONF_MONITOR))) 373 return -ENOTCONN; 374 375 if (state != MT76_TM_STATE_IDLE && 376 td->state != MT76_TM_STATE_IDLE) { 377 int ret; 378 379 ret = __mt76_testmode_set_state(phy, MT76_TM_STATE_IDLE); 380 if (ret) 381 return ret; 382 } 383 384 return __mt76_testmode_set_state(phy, state); 385 386} 387EXPORT_SYMBOL(mt76_testmode_set_state); 388 389static int 390mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max) 391{ 392 u8 val; 393 394 if (!attr) 395 return 0; 396 397 val = nla_get_u8(attr); 398 if (val < min || val > max) 399 return -EINVAL; 400 401 *dest = val; 402 return 0; 403} 404 405int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 406 void *data, int len) 407{ 408 struct mt76_phy *phy = hw->priv; 409 struct mt76_dev *dev = phy->dev; 410 struct mt76_testmode_data *td = &phy->test; 411 struct nlattr *tb[NUM_MT76_TM_ATTRS]; 412 u32 state; 413 int err; 414 int i; 415 416 if (!dev->test_ops) 417 return -EOPNOTSUPP; 418 419 err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, 420 mt76_tm_policy, NULL); 421 if (err) 422 return err; 423 424 err = -EINVAL; 425 426 mutex_lock(&dev->mutex); 427 428 if (tb[MT76_TM_ATTR_RESET]) { 429 mt76_testmode_set_state(phy, MT76_TM_STATE_OFF); 430 memset(td, 0, sizeof(*td)); 431 } 432 433 mt76_testmode_init_defaults(phy); 434 435 if (tb[MT76_TM_ATTR_TX_COUNT]) 436 td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]); 437 438 if (tb[MT76_TM_ATTR_TX_RATE_IDX]) 439 td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]); 440 441 if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode, 442 0, MT76_TM_TX_MODE_MAX) || 443 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss, 444 1, hweight8(phy->antenna_mask)) || 445 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 2) || 446 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) || 447 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) || 448 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) || 449 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], 450 &td->tx_antenna_mask, 0, 0xff) || 451 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_SPE_IDX], &td->tx_spe_idx, 0, 27) || 452 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_DUTY_CYCLE], 453 &td->tx_duty_cycle, 0, 99) || 454 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL], 455 &td->tx_power_control, 0, 1)) 456 goto out; 457 458 if (tb[MT76_TM_ATTR_TX_LENGTH]) { 459 u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); 460 461 if (val > mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode) || 462 val < sizeof(struct ieee80211_hdr)) 463 goto out; 464 465 td->tx_mpdu_len = val; 466 } 467 468 if (tb[MT76_TM_ATTR_TX_IPG]) 469 td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]); 470 471 if (tb[MT76_TM_ATTR_TX_TIME]) 472 td->tx_time = nla_get_u32(tb[MT76_TM_ATTR_TX_TIME]); 473 474 if (tb[MT76_TM_ATTR_FREQ_OFFSET]) 475 td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]); 476 477 if (tb[MT76_TM_ATTR_STATE]) { 478 state = nla_get_u32(tb[MT76_TM_ATTR_STATE]); 479 if (state > MT76_TM_STATE_MAX) 480 goto out; 481 } else { 482 state = td->state; 483 } 484 485 if (tb[MT76_TM_ATTR_TX_POWER]) { 486 struct nlattr *cur; 487 int idx = 0; 488 int rem; 489 490 nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) { 491 if (nla_len(cur) != 1 || 492 idx >= ARRAY_SIZE(td->tx_power)) 493 goto out; 494 495 td->tx_power[idx++] = nla_get_u8(cur); 496 } 497 } 498 499 if (tb[MT76_TM_ATTR_MAC_ADDRS]) { 500 struct nlattr *cur; 501 int idx = 0; 502 int rem; 503 504 nla_for_each_nested(cur, tb[MT76_TM_ATTR_MAC_ADDRS], rem) { 505 if (nla_len(cur) != ETH_ALEN || idx >= 3) 506 goto out; 507 508 memcpy(td->addr[idx], nla_data(cur), ETH_ALEN); 509 idx++; 510 } 511 } 512 513 if (dev->test_ops->set_params) { 514 err = dev->test_ops->set_params(phy, tb, state); 515 if (err) 516 goto out; 517 } 518 519 for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++) 520 if (tb[i]) 521 mt76_testmode_param_set(td, i); 522 523 err = 0; 524 if (tb[MT76_TM_ATTR_STATE]) 525 err = mt76_testmode_set_state(phy, state); 526 527out: 528 mutex_unlock(&dev->mutex); 529 530 return err; 531} 532EXPORT_SYMBOL(mt76_testmode_cmd); 533 534static int 535mt76_testmode_dump_stats(struct mt76_phy *phy, struct sk_buff *msg) 536{ 537 struct mt76_testmode_data *td = &phy->test; 538 struct mt76_dev *dev = phy->dev; 539 u64 rx_packets = 0; 540 u64 rx_fcs_error = 0; 541 int i; 542 543 if (dev->test_ops->dump_stats) { 544 int ret; 545 546 ret = dev->test_ops->dump_stats(phy, msg); 547 if (ret) 548 return ret; 549 } 550 551 for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) { 552 rx_packets += td->rx_stats.packets[i]; 553 rx_fcs_error += td->rx_stats.fcs_error[i]; 554 } 555 556 if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) || 557 nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) || 558 nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) || 559 nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets, 560 MT76_TM_STATS_ATTR_PAD) || 561 nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error, 562 MT76_TM_STATS_ATTR_PAD)) 563 return -EMSGSIZE; 564 565 return 0; 566} 567 568int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, 569 struct netlink_callback *cb, void *data, int len) 570{ 571 struct mt76_phy *phy = hw->priv; 572 struct mt76_dev *dev = phy->dev; 573 struct mt76_testmode_data *td = &phy->test; 574 struct nlattr *tb[NUM_MT76_TM_ATTRS] = {}; 575 int err = 0; 576 void *a; 577 int i; 578 579 if (!dev->test_ops) 580 return -EOPNOTSUPP; 581 582 if (cb->args[2]++ > 0) 583 return -ENOENT; 584 585 if (data) { 586 err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, 587 mt76_tm_policy, NULL); 588 if (err) 589 return err; 590 } 591 592 mutex_lock(&dev->mutex); 593 594 if (tb[MT76_TM_ATTR_STATS]) { 595 err = -EINVAL; 596 597 a = nla_nest_start(msg, MT76_TM_ATTR_STATS); 598 if (a) { 599 err = mt76_testmode_dump_stats(phy, msg); 600 nla_nest_end(msg, a); 601 } 602 603 goto out; 604 } 605 606 mt76_testmode_init_defaults(phy); 607 608 err = -EMSGSIZE; 609 if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state)) 610 goto out; 611 612 if (dev->test_mtd.name && 613 (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, dev->test_mtd.name) || 614 nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, dev->test_mtd.offset))) 615 goto out; 616 617 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) || 618 nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) || 619 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) || 620 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) || 621 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) || 622 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) || 623 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) || 624 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) || 625 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) && 626 nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) || 627 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) && 628 nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) || 629 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_SPE_IDX) && 630 nla_put_u8(msg, MT76_TM_ATTR_TX_SPE_IDX, td->tx_spe_idx)) || 631 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_DUTY_CYCLE) && 632 nla_put_u8(msg, MT76_TM_ATTR_TX_DUTY_CYCLE, td->tx_duty_cycle)) || 633 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_IPG) && 634 nla_put_u32(msg, MT76_TM_ATTR_TX_IPG, td->tx_ipg)) || 635 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_TIME) && 636 nla_put_u32(msg, MT76_TM_ATTR_TX_TIME, td->tx_time)) || 637 (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) && 638 nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) || 639 (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) && 640 nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset))) 641 goto out; 642 643 if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) { 644 a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER); 645 if (!a) 646 goto out; 647 648 for (i = 0; i < ARRAY_SIZE(td->tx_power); i++) 649 if (nla_put_u8(msg, i, td->tx_power[i])) 650 goto out; 651 652 nla_nest_end(msg, a); 653 } 654 655 if (mt76_testmode_param_present(td, MT76_TM_ATTR_MAC_ADDRS)) { 656 a = nla_nest_start(msg, MT76_TM_ATTR_MAC_ADDRS); 657 if (!a) 658 goto out; 659 660 for (i = 0; i < 3; i++) 661 if (nla_put(msg, i, ETH_ALEN, td->addr[i])) 662 goto out; 663 664 nla_nest_end(msg, a); 665 } 666 667 err = 0; 668 669out: 670 mutex_unlock(&dev->mutex); 671 672 return err; 673} 674EXPORT_SYMBOL(mt76_testmode_dump);