cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

peer.c (14645B)


      1// SPDX-License-Identifier: BSD-3-Clause-Clear
      2/*
      3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
      4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
      5 */
      6
      7#include "core.h"
      8#include "peer.h"
      9#include "debug.h"
     10
     11static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
     12						       int peer_id)
     13{
     14	struct ath11k_peer *peer;
     15
     16	lockdep_assert_held(&ab->base_lock);
     17
     18	list_for_each_entry(peer, &ab->peers, list) {
     19		if (peer->peer_id != peer_id)
     20			continue;
     21
     22		return peer;
     23	}
     24
     25	return NULL;
     26}
     27
     28struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
     29				     const u8 *addr)
     30{
     31	struct ath11k_peer *peer;
     32
     33	lockdep_assert_held(&ab->base_lock);
     34
     35	list_for_each_entry(peer, &ab->peers, list) {
     36		if (peer->vdev_id != vdev_id)
     37			continue;
     38		if (!ether_addr_equal(peer->addr, addr))
     39			continue;
     40
     41		return peer;
     42	}
     43
     44	return NULL;
     45}
     46
     47struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
     48					     const u8 *addr)
     49{
     50	struct ath11k_peer *peer;
     51
     52	lockdep_assert_held(&ab->base_lock);
     53
     54	if (!ab->rhead_peer_addr)
     55		return NULL;
     56
     57	peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
     58				      ab->rhash_peer_addr_param);
     59
     60	return peer;
     61}
     62
     63struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
     64					   int peer_id)
     65{
     66	struct ath11k_peer *peer;
     67
     68	lockdep_assert_held(&ab->base_lock);
     69
     70	if (!ab->rhead_peer_id)
     71		return NULL;
     72
     73	peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
     74				      ab->rhash_peer_id_param);
     75
     76	return peer;
     77}
     78
     79struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
     80						int vdev_id)
     81{
     82	struct ath11k_peer *peer;
     83
     84	spin_lock_bh(&ab->base_lock);
     85
     86	list_for_each_entry(peer, &ab->peers, list) {
     87		if (vdev_id == peer->vdev_id) {
     88			spin_unlock_bh(&ab->base_lock);
     89			return peer;
     90		}
     91	}
     92	spin_unlock_bh(&ab->base_lock);
     93	return NULL;
     94}
     95
     96void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
     97{
     98	struct ath11k_peer *peer;
     99
    100	spin_lock_bh(&ab->base_lock);
    101
    102	peer = ath11k_peer_find_list_by_id(ab, peer_id);
    103	if (!peer) {
    104		ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
    105			    peer_id);
    106		goto exit;
    107	}
    108
    109	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
    110		   peer->vdev_id, peer->addr, peer_id);
    111
    112	list_del(&peer->list);
    113	kfree(peer);
    114	wake_up(&ab->peer_mapping_wq);
    115
    116exit:
    117	spin_unlock_bh(&ab->base_lock);
    118}
    119
    120void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
    121			   u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
    122{
    123	struct ath11k_peer *peer;
    124
    125	spin_lock_bh(&ab->base_lock);
    126	peer = ath11k_peer_find(ab, vdev_id, mac_addr);
    127	if (!peer) {
    128		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
    129		if (!peer)
    130			goto exit;
    131
    132		peer->vdev_id = vdev_id;
    133		peer->peer_id = peer_id;
    134		peer->ast_hash = ast_hash;
    135		peer->hw_peer_id = hw_peer_id;
    136		ether_addr_copy(peer->addr, mac_addr);
    137		list_add(&peer->list, &ab->peers);
    138		wake_up(&ab->peer_mapping_wq);
    139	}
    140
    141	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
    142		   vdev_id, mac_addr, peer_id);
    143
    144exit:
    145	spin_unlock_bh(&ab->base_lock);
    146}
    147
    148static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
    149				       const u8 *addr, bool expect_mapped)
    150{
    151	int ret;
    152
    153	ret = wait_event_timeout(ab->peer_mapping_wq, ({
    154				bool mapped;
    155
    156				spin_lock_bh(&ab->base_lock);
    157				mapped = !!ath11k_peer_find(ab, vdev_id, addr);
    158				spin_unlock_bh(&ab->base_lock);
    159
    160				(mapped == expect_mapped ||
    161				 test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
    162				}), 3 * HZ);
    163
    164	if (ret <= 0)
    165		return -ETIMEDOUT;
    166
    167	return 0;
    168}
    169
    170static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
    171					   struct rhashtable *rtbl,
    172					   struct rhash_head *rhead,
    173					   struct rhashtable_params *params,
    174					   void *key)
    175{
    176	struct ath11k_peer *tmp;
    177
    178	lockdep_assert_held(&ab->tbl_mtx_lock);
    179
    180	tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
    181
    182	if (!tmp)
    183		return 0;
    184	else if (IS_ERR(tmp))
    185		return PTR_ERR(tmp);
    186	else
    187		return -EEXIST;
    188}
    189
    190static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
    191					   struct rhashtable *rtbl,
    192					   struct rhash_head *rhead,
    193					   struct rhashtable_params *params)
    194{
    195	int ret;
    196
    197	lockdep_assert_held(&ab->tbl_mtx_lock);
    198
    199	ret = rhashtable_remove_fast(rtbl, rhead, *params);
    200	if (ret && ret != -ENOENT)
    201		return ret;
    202
    203	return 0;
    204}
    205
    206static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
    207{
    208	int ret;
    209
    210	lockdep_assert_held(&ab->base_lock);
    211	lockdep_assert_held(&ab->tbl_mtx_lock);
    212
    213	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
    214		return -EPERM;
    215
    216	ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
    217				       &ab->rhash_peer_id_param, &peer->peer_id);
    218	if (ret) {
    219		ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
    220			    peer->addr, peer->peer_id, ret);
    221		return ret;
    222	}
    223
    224	ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
    225				       &ab->rhash_peer_addr_param, &peer->addr);
    226	if (ret) {
    227		ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
    228			    peer->addr, peer->peer_id, ret);
    229		goto err_clean;
    230	}
    231
    232	return 0;
    233
    234err_clean:
    235	ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
    236				 &ab->rhash_peer_id_param);
    237	return ret;
    238}
    239
    240void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
    241{
    242	struct ath11k_peer *peer, *tmp;
    243	struct ath11k_base *ab = ar->ab;
    244
    245	lockdep_assert_held(&ar->conf_mutex);
    246
    247	mutex_lock(&ab->tbl_mtx_lock);
    248	spin_lock_bh(&ab->base_lock);
    249	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
    250		if (peer->vdev_id != vdev_id)
    251			continue;
    252
    253		ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
    254			    peer->addr, vdev_id);
    255
    256		ath11k_peer_rhash_delete(ab, peer);
    257		list_del(&peer->list);
    258		kfree(peer);
    259		ar->num_peers--;
    260	}
    261
    262	spin_unlock_bh(&ab->base_lock);
    263	mutex_unlock(&ab->tbl_mtx_lock);
    264}
    265
    266static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
    267{
    268	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
    269}
    270
    271int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
    272				     const u8 *addr)
    273{
    274	int ret;
    275	unsigned long time_left;
    276
    277	ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
    278	if (ret) {
    279		ath11k_warn(ar->ab, "failed wait for peer deleted");
    280		return ret;
    281	}
    282
    283	time_left = wait_for_completion_timeout(&ar->peer_delete_done,
    284						3 * HZ);
    285	if (time_left == 0) {
    286		ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
    287		return -ETIMEDOUT;
    288	}
    289
    290	return 0;
    291}
    292
    293static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
    294{
    295	int ret;
    296	struct ath11k_peer *peer;
    297	struct ath11k_base *ab = ar->ab;
    298
    299	lockdep_assert_held(&ar->conf_mutex);
    300
    301	mutex_lock(&ab->tbl_mtx_lock);
    302	spin_lock_bh(&ab->base_lock);
    303
    304	peer = ath11k_peer_find_by_addr(ab, addr);
    305	if (!peer) {
    306		spin_unlock_bh(&ab->base_lock);
    307		mutex_unlock(&ab->tbl_mtx_lock);
    308
    309		ath11k_warn(ab,
    310			    "failed to find peer vdev_id %d addr %pM in delete\n",
    311			    vdev_id, addr);
    312		return -EINVAL;
    313	}
    314
    315	ath11k_peer_rhash_delete(ab, peer);
    316
    317	spin_unlock_bh(&ab->base_lock);
    318	mutex_unlock(&ab->tbl_mtx_lock);
    319
    320	reinit_completion(&ar->peer_delete_done);
    321
    322	ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
    323	if (ret) {
    324		ath11k_warn(ab,
    325			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
    326			    vdev_id, addr, ret);
    327		return ret;
    328	}
    329
    330	ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
    331	if (ret)
    332		return ret;
    333
    334	return 0;
    335}
    336
    337int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
    338{
    339	int ret;
    340
    341	lockdep_assert_held(&ar->conf_mutex);
    342
    343	ret = __ath11k_peer_delete(ar, vdev_id, addr);
    344	if (ret)
    345		return ret;
    346
    347	ar->num_peers--;
    348
    349	return 0;
    350}
    351
    352static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
    353{
    354	return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
    355}
    356
    357int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
    358		       struct ieee80211_sta *sta, struct peer_create_params *param)
    359{
    360	struct ath11k_peer *peer;
    361	struct ath11k_sta *arsta;
    362	int ret, fbret;
    363
    364	lockdep_assert_held(&ar->conf_mutex);
    365
    366	if (ar->num_peers > (ar->max_num_peers - 1)) {
    367		ath11k_warn(ar->ab,
    368			    "failed to create peer due to insufficient peer entry resource in firmware\n");
    369		return -ENOBUFS;
    370	}
    371
    372	spin_lock_bh(&ar->ab->base_lock);
    373	peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
    374	if (peer) {
    375		spin_unlock_bh(&ar->ab->base_lock);
    376		return -EINVAL;
    377	}
    378	spin_unlock_bh(&ar->ab->base_lock);
    379
    380	ret = ath11k_wmi_send_peer_create_cmd(ar, param);
    381	if (ret) {
    382		ath11k_warn(ar->ab,
    383			    "failed to send peer create vdev_id %d ret %d\n",
    384			    param->vdev_id, ret);
    385		return ret;
    386	}
    387
    388	ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
    389					   param->peer_addr);
    390	if (ret)
    391		return ret;
    392
    393	mutex_lock(&ar->ab->tbl_mtx_lock);
    394	spin_lock_bh(&ar->ab->base_lock);
    395
    396	peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
    397	if (!peer) {
    398		spin_unlock_bh(&ar->ab->base_lock);
    399		mutex_unlock(&ar->ab->tbl_mtx_lock);
    400		ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
    401			    param->peer_addr, param->vdev_id);
    402
    403		ret = -ENOENT;
    404		goto cleanup;
    405	}
    406
    407	ret = ath11k_peer_rhash_add(ar->ab, peer);
    408	if (ret) {
    409		spin_unlock_bh(&ar->ab->base_lock);
    410		mutex_unlock(&ar->ab->tbl_mtx_lock);
    411		goto cleanup;
    412	}
    413
    414	peer->pdev_idx = ar->pdev_idx;
    415	peer->sta = sta;
    416
    417	if (arvif->vif->type == NL80211_IFTYPE_STATION) {
    418		arvif->ast_hash = peer->ast_hash;
    419		arvif->ast_idx = peer->hw_peer_id;
    420	}
    421
    422	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
    423	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
    424
    425	if (sta) {
    426		arsta = (struct ath11k_sta *)sta->drv_priv;
    427		arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
    428				       FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
    429						  peer->peer_id);
    430
    431		/* set HTT extension valid bit to 0 by default */
    432		arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
    433	}
    434
    435	ar->num_peers++;
    436
    437	spin_unlock_bh(&ar->ab->base_lock);
    438	mutex_unlock(&ar->ab->tbl_mtx_lock);
    439
    440	return 0;
    441
    442cleanup:
    443	fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
    444	if (fbret)
    445		ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
    446			    param->peer_addr, param->vdev_id, fbret);
    447
    448	return ret;
    449}
    450
    451int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
    452{
    453	int ret;
    454
    455	lockdep_assert_held(&ab->base_lock);
    456	lockdep_assert_held(&ab->tbl_mtx_lock);
    457
    458	if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
    459		return -EPERM;
    460
    461	ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
    462				       &ab->rhash_peer_addr_param);
    463	if (ret) {
    464		ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
    465			    peer->addr, peer->peer_id, ret);
    466		return ret;
    467	}
    468
    469	ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
    470				       &ab->rhash_peer_id_param);
    471	if (ret) {
    472		ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
    473			    peer->addr, peer->peer_id, ret);
    474		return ret;
    475	}
    476
    477	return 0;
    478}
    479
    480static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
    481{
    482	struct rhashtable_params *param;
    483	struct rhashtable *rhash_id_tbl;
    484	int ret;
    485	size_t size;
    486
    487	lockdep_assert_held(&ab->tbl_mtx_lock);
    488
    489	if (ab->rhead_peer_id)
    490		return 0;
    491
    492	size = sizeof(*ab->rhead_peer_id);
    493	rhash_id_tbl = kzalloc(size, GFP_KERNEL);
    494	if (!rhash_id_tbl) {
    495		ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
    496			    size);
    497		return -ENOMEM;
    498	}
    499
    500	param = &ab->rhash_peer_id_param;
    501
    502	param->key_offset = offsetof(struct ath11k_peer, peer_id);
    503	param->head_offset = offsetof(struct ath11k_peer, rhash_id);
    504	param->key_len = sizeof_field(struct ath11k_peer, peer_id);
    505	param->automatic_shrinking = true;
    506	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
    507
    508	ret = rhashtable_init(rhash_id_tbl, param);
    509	if (ret) {
    510		ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
    511		goto err_free;
    512	}
    513
    514	spin_lock_bh(&ab->base_lock);
    515
    516	if (!ab->rhead_peer_id) {
    517		ab->rhead_peer_id = rhash_id_tbl;
    518	} else {
    519		spin_unlock_bh(&ab->base_lock);
    520		goto cleanup_tbl;
    521	}
    522
    523	spin_unlock_bh(&ab->base_lock);
    524
    525	return 0;
    526
    527cleanup_tbl:
    528	rhashtable_destroy(rhash_id_tbl);
    529err_free:
    530	kfree(rhash_id_tbl);
    531
    532	return ret;
    533}
    534
    535static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
    536{
    537	struct rhashtable_params *param;
    538	struct rhashtable *rhash_addr_tbl;
    539	int ret;
    540	size_t size;
    541
    542	lockdep_assert_held(&ab->tbl_mtx_lock);
    543
    544	if (ab->rhead_peer_addr)
    545		return 0;
    546
    547	size = sizeof(*ab->rhead_peer_addr);
    548	rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
    549	if (!rhash_addr_tbl) {
    550		ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
    551			    size);
    552		return -ENOMEM;
    553	}
    554
    555	param = &ab->rhash_peer_addr_param;
    556
    557	param->key_offset = offsetof(struct ath11k_peer, addr);
    558	param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
    559	param->key_len = sizeof_field(struct ath11k_peer, addr);
    560	param->automatic_shrinking = true;
    561	param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
    562
    563	ret = rhashtable_init(rhash_addr_tbl, param);
    564	if (ret) {
    565		ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
    566		goto err_free;
    567	}
    568
    569	spin_lock_bh(&ab->base_lock);
    570
    571	if (!ab->rhead_peer_addr) {
    572		ab->rhead_peer_addr = rhash_addr_tbl;
    573	} else {
    574		spin_unlock_bh(&ab->base_lock);
    575		goto cleanup_tbl;
    576	}
    577
    578	spin_unlock_bh(&ab->base_lock);
    579
    580	return 0;
    581
    582cleanup_tbl:
    583	rhashtable_destroy(rhash_addr_tbl);
    584err_free:
    585	kfree(rhash_addr_tbl);
    586
    587	return ret;
    588}
    589
    590static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
    591{
    592	lockdep_assert_held(&ab->tbl_mtx_lock);
    593
    594	if (!ab->rhead_peer_id)
    595		return;
    596
    597	rhashtable_destroy(ab->rhead_peer_id);
    598	kfree(ab->rhead_peer_id);
    599	ab->rhead_peer_id = NULL;
    600}
    601
    602static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
    603{
    604	lockdep_assert_held(&ab->tbl_mtx_lock);
    605
    606	if (!ab->rhead_peer_addr)
    607		return;
    608
    609	rhashtable_destroy(ab->rhead_peer_addr);
    610	kfree(ab->rhead_peer_addr);
    611	ab->rhead_peer_addr = NULL;
    612}
    613
    614int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
    615{
    616	int ret;
    617
    618	mutex_lock(&ab->tbl_mtx_lock);
    619
    620	ret = ath11k_peer_rhash_id_tbl_init(ab);
    621	if (ret)
    622		goto out;
    623
    624	ret = ath11k_peer_rhash_addr_tbl_init(ab);
    625	if (ret)
    626		goto cleanup_tbl;
    627
    628	mutex_unlock(&ab->tbl_mtx_lock);
    629
    630	return 0;
    631
    632cleanup_tbl:
    633	ath11k_peer_rhash_id_tbl_destroy(ab);
    634out:
    635	mutex_unlock(&ab->tbl_mtx_lock);
    636	return ret;
    637}
    638
    639void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
    640{
    641	mutex_lock(&ab->tbl_mtx_lock);
    642
    643	ath11k_peer_rhash_addr_tbl_destroy(ab);
    644	ath11k_peer_rhash_id_tbl_destroy(ab);
    645
    646	mutex_unlock(&ab->tbl_mtx_lock);
    647}