cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dbring.c (9613B)


      1// SPDX-License-Identifier: BSD-3-Clause-Clear
      2/*
      3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
      4 */
      5
      6#include "core.h"
      7#include "debug.h"
      8
      9#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
     10
     11int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
     12{
     13	u32 *temp;
     14	int idx;
     15
     16	size = size >> 2;
     17
     18	for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
     19		if (*temp == ATH11K_DB_MAGIC_VALUE)
     20			return -EINVAL;
     21	}
     22
     23	return 0;
     24}
     25
     26static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
     27					   void *buffer, u32 size)
     28{
     29	u32 *temp;
     30	int idx;
     31
     32	size = size >> 2;
     33
     34	for (idx = 0, temp = buffer; idx < size; idx++, temp++)
     35		*temp++ = ATH11K_DB_MAGIC_VALUE;
     36}
     37
     38static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
     39					struct ath11k_dbring *ring,
     40					struct ath11k_dbring_element *buff,
     41					enum wmi_direct_buffer_module id)
     42{
     43	struct ath11k_base *ab = ar->ab;
     44	struct hal_srng *srng;
     45	dma_addr_t paddr;
     46	void *ptr_aligned, *ptr_unaligned, *desc;
     47	int ret;
     48	int buf_id;
     49	u32 cookie;
     50
     51	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
     52
     53	lockdep_assert_held(&srng->lock);
     54
     55	ath11k_hal_srng_access_begin(ab, srng);
     56
     57	ptr_unaligned = buff->payload;
     58	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
     59	ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
     60	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
     61			       DMA_FROM_DEVICE);
     62
     63	ret = dma_mapping_error(ab->dev, paddr);
     64	if (ret)
     65		goto err;
     66
     67	spin_lock_bh(&ring->idr_lock);
     68	buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
     69	spin_unlock_bh(&ring->idr_lock);
     70	if (buf_id < 0) {
     71		ret = -ENOBUFS;
     72		goto err_dma_unmap;
     73	}
     74
     75	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
     76	if (!desc) {
     77		ret = -ENOENT;
     78		goto err_idr_remove;
     79	}
     80
     81	buff->paddr = paddr;
     82
     83	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
     84		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
     85
     86	ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
     87
     88	ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
     89	ath11k_hal_srng_access_end(ab, srng);
     90
     91	return 0;
     92
     93err_idr_remove:
     94	spin_lock_bh(&ring->idr_lock);
     95	idr_remove(&ring->bufs_idr, buf_id);
     96	spin_unlock_bh(&ring->idr_lock);
     97err_dma_unmap:
     98	dma_unmap_single(ab->dev, paddr, ring->buf_sz,
     99			 DMA_FROM_DEVICE);
    100err:
    101	ath11k_hal_srng_access_end(ab, srng);
    102	return ret;
    103}
    104
    105static int ath11k_dbring_fill_bufs(struct ath11k *ar,
    106				   struct ath11k_dbring *ring,
    107				   enum wmi_direct_buffer_module id)
    108{
    109	struct ath11k_dbring_element *buff;
    110	struct hal_srng *srng;
    111	int num_remain, req_entries, num_free;
    112	u32 align;
    113	int size, ret;
    114
    115	srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
    116
    117	spin_lock_bh(&srng->lock);
    118
    119	num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
    120	req_entries = min(num_free, ring->bufs_max);
    121	num_remain = req_entries;
    122	align = ring->buf_align;
    123	size = ring->buf_sz + align - 1;
    124
    125	while (num_remain > 0) {
    126		buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
    127		if (!buff)
    128			break;
    129
    130		buff->payload = kzalloc(size, GFP_ATOMIC);
    131		if (!buff->payload) {
    132			kfree(buff);
    133			break;
    134		}
    135		ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
    136		if (ret) {
    137			ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
    138				    num_remain, req_entries);
    139			kfree(buff->payload);
    140			kfree(buff);
    141			break;
    142		}
    143		num_remain--;
    144	}
    145
    146	spin_unlock_bh(&srng->lock);
    147
    148	return num_remain;
    149}
    150
    151int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
    152				struct ath11k_dbring *ring,
    153				enum wmi_direct_buffer_module id)
    154{
    155	struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
    156	int ret;
    157
    158	if (id >= WMI_DIRECT_BUF_MAX)
    159		return -EINVAL;
    160
    161	param.pdev_id		= DP_SW2HW_MACID(ring->pdev_id);
    162	param.module_id		= id;
    163	param.base_paddr_lo	= lower_32_bits(ring->refill_srng.paddr);
    164	param.base_paddr_hi	= upper_32_bits(ring->refill_srng.paddr);
    165	param.head_idx_paddr_lo	= lower_32_bits(ring->hp_addr);
    166	param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
    167	param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
    168	param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
    169	param.num_elems		= ring->bufs_max;
    170	param.buf_size		= ring->buf_sz;
    171	param.num_resp_per_event = ring->num_resp_per_event;
    172	param.event_timeout_ms	= ring->event_timeout_ms;
    173
    174	ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
    175	if (ret) {
    176		ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
    177		return ret;
    178	}
    179
    180	return 0;
    181}
    182
    183int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
    184			  u32 num_resp_per_event, u32 event_timeout_ms,
    185			  int (*handler)(struct ath11k *,
    186					 struct ath11k_dbring_data *))
    187{
    188	if (WARN_ON(!ring))
    189		return -EINVAL;
    190
    191	ring->num_resp_per_event = num_resp_per_event;
    192	ring->event_timeout_ms = event_timeout_ms;
    193	ring->handler = handler;
    194
    195	return 0;
    196}
    197
    198int ath11k_dbring_buf_setup(struct ath11k *ar,
    199			    struct ath11k_dbring *ring,
    200			    struct ath11k_dbring_cap *db_cap)
    201{
    202	struct ath11k_base *ab = ar->ab;
    203	struct hal_srng *srng;
    204	int ret;
    205
    206	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
    207	ring->bufs_max = ring->refill_srng.size /
    208		ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
    209
    210	ring->buf_sz = db_cap->min_buf_sz;
    211	ring->buf_align = db_cap->min_buf_align;
    212	ring->pdev_id = db_cap->pdev_id;
    213	ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
    214	ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
    215
    216	ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
    217
    218	return ret;
    219}
    220
    221int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
    222			     int ring_num, int num_entries)
    223{
    224	int ret;
    225
    226	ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
    227				   ring_num, ar->pdev_idx, num_entries);
    228	if (ret < 0) {
    229		ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
    230			    ret, ring_num);
    231		goto err;
    232	}
    233
    234	return 0;
    235err:
    236	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
    237	return ret;
    238}
    239
    240int ath11k_dbring_get_cap(struct ath11k_base *ab,
    241			  u8 pdev_idx,
    242			  enum wmi_direct_buffer_module id,
    243			  struct ath11k_dbring_cap *db_cap)
    244{
    245	int i;
    246
    247	if (!ab->num_db_cap || !ab->db_caps)
    248		return -ENOENT;
    249
    250	if (id >= WMI_DIRECT_BUF_MAX)
    251		return -EINVAL;
    252
    253	for (i = 0; i < ab->num_db_cap; i++) {
    254		if (pdev_idx == ab->db_caps[i].pdev_id &&
    255		    id == ab->db_caps[i].id) {
    256			*db_cap = ab->db_caps[i];
    257
    258			return 0;
    259		}
    260	}
    261
    262	return -ENOENT;
    263}
    264
    265int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
    266				       struct ath11k_dbring_buf_release_event *ev)
    267{
    268	struct ath11k_dbring *ring;
    269	struct hal_srng *srng;
    270	struct ath11k *ar;
    271	struct ath11k_dbring_element *buff;
    272	struct ath11k_dbring_data handler_data;
    273	struct ath11k_buffer_addr desc;
    274	u8 *vaddr_unalign;
    275	u32 num_entry, num_buff_reaped;
    276	u8 pdev_idx, rbm, module_id;
    277	u32 cookie;
    278	int buf_id;
    279	int size;
    280	dma_addr_t paddr;
    281	int ret = 0;
    282
    283	pdev_idx = ev->fixed.pdev_id;
    284	module_id = ev->fixed.module_id;
    285
    286	if (pdev_idx >= ab->num_radios) {
    287		ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
    288		return -EINVAL;
    289	}
    290
    291	if (ev->fixed.num_buf_release_entry !=
    292	    ev->fixed.num_meta_data_entry) {
    293		ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
    294			    ev->fixed.num_buf_release_entry,
    295			    ev->fixed.num_meta_data_entry);
    296		return -EINVAL;
    297	}
    298
    299	ar = ab->pdevs[pdev_idx].ar;
    300
    301	rcu_read_lock();
    302	if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
    303		ret = -EINVAL;
    304		goto rcu_unlock;
    305	}
    306
    307	switch (ev->fixed.module_id) {
    308	case WMI_DIRECT_BUF_SPECTRAL:
    309		ring = ath11k_spectral_get_dbring(ar);
    310		break;
    311	default:
    312		ring = NULL;
    313		ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
    314			    ev->fixed.module_id);
    315		break;
    316	}
    317
    318	if (!ring) {
    319		ret = -EINVAL;
    320		goto rcu_unlock;
    321	}
    322
    323	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
    324	num_entry = ev->fixed.num_buf_release_entry;
    325	size = ring->buf_sz + ring->buf_align - 1;
    326	num_buff_reaped = 0;
    327
    328	spin_lock_bh(&srng->lock);
    329
    330	while (num_buff_reaped < num_entry) {
    331		desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
    332		desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
    333		handler_data.meta = ev->meta_data[num_buff_reaped];
    334
    335		num_buff_reaped++;
    336
    337		ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
    338
    339		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
    340
    341		spin_lock_bh(&ring->idr_lock);
    342		buff = idr_find(&ring->bufs_idr, buf_id);
    343		if (!buff) {
    344			spin_unlock_bh(&ring->idr_lock);
    345			continue;
    346		}
    347		idr_remove(&ring->bufs_idr, buf_id);
    348		spin_unlock_bh(&ring->idr_lock);
    349
    350		dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
    351				 DMA_FROM_DEVICE);
    352
    353		ath11k_debugfs_add_dbring_entry(ar, module_id,
    354						ATH11K_DBG_DBR_EVENT_RX, srng);
    355
    356		if (ring->handler) {
    357			vaddr_unalign = buff->payload;
    358			handler_data.data = PTR_ALIGN(vaddr_unalign,
    359						      ring->buf_align);
    360			handler_data.data_sz = ring->buf_sz;
    361
    362			ring->handler(ar, &handler_data);
    363		}
    364
    365		buff->paddr = 0;
    366		memset(buff->payload, 0, size);
    367		ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
    368	}
    369
    370	spin_unlock_bh(&srng->lock);
    371
    372rcu_unlock:
    373	rcu_read_unlock();
    374
    375	return ret;
    376}
    377
    378void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
    379{
    380	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
    381}
    382
    383void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
    384{
    385	struct ath11k_dbring_element *buff;
    386	int buf_id;
    387
    388	spin_lock_bh(&ring->idr_lock);
    389	idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
    390		idr_remove(&ring->bufs_idr, buf_id);
    391		dma_unmap_single(ar->ab->dev, buff->paddr,
    392				 ring->buf_sz, DMA_FROM_DEVICE);
    393		kfree(buff->payload);
    394		kfree(buff);
    395	}
    396
    397	idr_destroy(&ring->bufs_idr);
    398	spin_unlock_bh(&ring->idr_lock);
    399}