cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sdio.c (14188B)


      1// SPDX-License-Identifier: ISC
      2/* Copyright (C) 2020 MediaTek Inc.
      3 *
      4 * This file is written based on mt76/usb.c.
      5 *
      6 * Author: Felix Fietkau <nbd@nbd.name>
      7 *	   Lorenzo Bianconi <lorenzo@kernel.org>
      8 *	   Sean Wang <sean.wang@mediatek.com>
      9 */
     10
     11#include <linux/iopoll.h>
     12#include <linux/kernel.h>
     13#include <linux/module.h>
     14#include <linux/mmc/sdio_func.h>
     15#include <linux/mmc/card.h>
     16#include <linux/mmc/host.h>
     17#include <linux/sched.h>
     18#include <linux/kthread.h>
     19
     20#include "mt76.h"
     21#include "sdio.h"
     22
     23static u32 mt76s_read_whisr(struct mt76_dev *dev)
     24{
     25	return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
     26}
     27
     28u32 mt76s_read_pcr(struct mt76_dev *dev)
     29{
     30	struct mt76_sdio *sdio = &dev->sdio;
     31
     32	return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
     33}
     34EXPORT_SYMBOL_GPL(mt76s_read_pcr);
     35
     36static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
     37{
     38	struct sdio_func *func = dev->sdio.func;
     39	u32 val = ~0, status;
     40	int err;
     41
     42	sdio_claim_host(func);
     43
     44	sdio_writel(func, offset, MCR_H2DSM0R, &err);
     45	if (err < 0) {
     46		dev_err(dev->dev, "failed setting address [err=%d]\n", err);
     47		goto out;
     48	}
     49
     50	sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
     51	if (err < 0) {
     52		dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
     53		goto out;
     54	}
     55
     56	err = readx_poll_timeout(mt76s_read_whisr, dev, status,
     57				 status & H2D_SW_INT_READ, 0, 1000000);
     58	if (err < 0) {
     59		dev_err(dev->dev, "query whisr timeout\n");
     60		goto out;
     61	}
     62
     63	sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
     64	if (err < 0) {
     65		dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
     66		goto out;
     67	}
     68
     69	val = sdio_readl(func, MCR_H2DSM0R, &err);
     70	if (err < 0) {
     71		dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
     72		goto out;
     73	}
     74
     75	if (val != offset) {
     76		dev_err(dev->dev, "register mismatch\n");
     77		val = ~0;
     78		goto out;
     79	}
     80
     81	val = sdio_readl(func, MCR_D2HRM1R, &err);
     82	if (err < 0)
     83		dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
     84
     85out:
     86	sdio_release_host(func);
     87
     88	return val;
     89}
     90
     91static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
     92{
     93	struct sdio_func *func = dev->sdio.func;
     94	u32 status;
     95	int err;
     96
     97	sdio_claim_host(func);
     98
     99	sdio_writel(func, offset, MCR_H2DSM0R, &err);
    100	if (err < 0) {
    101		dev_err(dev->dev, "failed setting address [err=%d]\n", err);
    102		goto out;
    103	}
    104
    105	sdio_writel(func, val, MCR_H2DSM1R, &err);
    106	if (err < 0) {
    107		dev_err(dev->dev,
    108			"failed setting write value [err=%d]\n", err);
    109		goto out;
    110	}
    111
    112	sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
    113	if (err < 0) {
    114		dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
    115		goto out;
    116	}
    117
    118	err = readx_poll_timeout(mt76s_read_whisr, dev, status,
    119				 status & H2D_SW_INT_WRITE, 0, 1000000);
    120	if (err < 0) {
    121		dev_err(dev->dev, "query whisr timeout\n");
    122		goto out;
    123	}
    124
    125	sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
    126	if (err < 0) {
    127		dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
    128		goto out;
    129	}
    130
    131	val = sdio_readl(func, MCR_H2DSM0R, &err);
    132	if (err < 0) {
    133		dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
    134		goto out;
    135	}
    136
    137	if (val != offset)
    138		dev_err(dev->dev, "register mismatch\n");
    139
    140out:
    141	sdio_release_host(func);
    142}
    143
    144u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
    145{
    146	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
    147		return dev->mcu_ops->mcu_rr(dev, offset);
    148	else
    149		return mt76s_read_mailbox(dev, offset);
    150}
    151EXPORT_SYMBOL_GPL(mt76s_rr);
    152
    153void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
    154{
    155	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
    156		dev->mcu_ops->mcu_wr(dev, offset, val);
    157	else
    158		mt76s_write_mailbox(dev, offset, val);
    159}
    160EXPORT_SYMBOL_GPL(mt76s_wr);
    161
    162u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
    163{
    164	val |= mt76s_rr(dev, offset) & ~mask;
    165	mt76s_wr(dev, offset, val);
    166
    167	return val;
    168}
    169EXPORT_SYMBOL_GPL(mt76s_rmw);
    170
    171void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
    172		      const void *data, int len)
    173{
    174	const u32 *val = data;
    175	int i;
    176
    177	for (i = 0; i < len / sizeof(u32); i++) {
    178		mt76s_wr(dev, offset, val[i]);
    179		offset += sizeof(u32);
    180	}
    181}
    182EXPORT_SYMBOL_GPL(mt76s_write_copy);
    183
    184void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
    185		     void *data, int len)
    186{
    187	u32 *val = data;
    188	int i;
    189
    190	for (i = 0; i < len / sizeof(u32); i++) {
    191		val[i] = mt76s_rr(dev, offset);
    192		offset += sizeof(u32);
    193	}
    194}
    195EXPORT_SYMBOL_GPL(mt76s_read_copy);
    196
    197int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
    198		const struct mt76_reg_pair *data,
    199		int len)
    200{
    201	int i;
    202
    203	for (i = 0; i < len; i++) {
    204		mt76s_wr(dev, data->reg, data->value);
    205		data++;
    206	}
    207
    208	return 0;
    209}
    210EXPORT_SYMBOL_GPL(mt76s_wr_rp);
    211
    212int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
    213		struct mt76_reg_pair *data, int len)
    214{
    215	int i;
    216
    217	for (i = 0; i < len; i++) {
    218		data->value = mt76s_rr(dev, data->reg);
    219		data++;
    220	}
    221
    222	return 0;
    223}
    224EXPORT_SYMBOL_GPL(mt76s_rd_rp);
    225
    226int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
    227{
    228	u32 status, ctrl;
    229	int ret;
    230
    231	dev->sdio.hw_ver = hw_ver;
    232
    233	sdio_claim_host(func);
    234
    235	ret = sdio_enable_func(func);
    236	if (ret < 0)
    237		goto release;
    238
    239	/* Get ownership from the device */
    240	sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
    241		    MCR_WHLPCR, &ret);
    242	if (ret < 0)
    243		goto disable_func;
    244
    245	ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
    246				 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
    247	if (ret < 0) {
    248		dev_err(dev->dev, "Cannot get ownership from device");
    249		goto disable_func;
    250	}
    251
    252	ret = sdio_set_block_size(func, 512);
    253	if (ret < 0)
    254		goto disable_func;
    255
    256	/* Enable interrupt */
    257	sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
    258	if (ret < 0)
    259		goto disable_func;
    260
    261	ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
    262	if (hw_ver == MT76_CONNAC2_SDIO)
    263		ctrl |= WHIER_RX1_DONE_INT_EN;
    264	sdio_writel(func, ctrl, MCR_WHIER, &ret);
    265	if (ret < 0)
    266		goto disable_func;
    267
    268	switch (hw_ver) {
    269	case MT76_CONNAC_SDIO:
    270		/* set WHISR as read clear and Rx aggregation number as 16 */
    271		ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
    272		break;
    273	default:
    274		ctrl = sdio_readl(func, MCR_WHCR, &ret);
    275		if (ret < 0)
    276			goto disable_func;
    277		ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
    278		ctrl &= ~W_INT_CLR_CTRL; /* read clear */
    279		ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
    280		break;
    281	}
    282
    283	sdio_writel(func, ctrl, MCR_WHCR, &ret);
    284	if (ret < 0)
    285		goto disable_func;
    286
    287	ret = sdio_claim_irq(func, mt76s_sdio_irq);
    288	if (ret < 0)
    289		goto disable_func;
    290
    291	sdio_release_host(func);
    292
    293	return 0;
    294
    295disable_func:
    296	sdio_disable_func(func);
    297release:
    298	sdio_release_host(func);
    299
    300	return ret;
    301}
    302EXPORT_SYMBOL_GPL(mt76s_hw_init);
    303
    304int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
    305{
    306	struct mt76_queue *q = &dev->q_rx[qid];
    307
    308	spin_lock_init(&q->lock);
    309	q->entry = devm_kcalloc(dev->dev,
    310				MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
    311				GFP_KERNEL);
    312	if (!q->entry)
    313		return -ENOMEM;
    314
    315	q->ndesc = MT76S_NUM_RX_ENTRIES;
    316	q->head = q->tail = 0;
    317	q->queued = 0;
    318
    319	return 0;
    320}
    321EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
    322
    323static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
    324{
    325	struct mt76_queue *q;
    326
    327	q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
    328	if (!q)
    329		return ERR_PTR(-ENOMEM);
    330
    331	spin_lock_init(&q->lock);
    332	q->entry = devm_kcalloc(dev->dev,
    333				MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
    334				GFP_KERNEL);
    335	if (!q->entry)
    336		return ERR_PTR(-ENOMEM);
    337
    338	q->ndesc = MT76S_NUM_TX_ENTRIES;
    339
    340	return q;
    341}
    342
    343int mt76s_alloc_tx(struct mt76_dev *dev)
    344{
    345	struct mt76_queue *q;
    346	int i;
    347
    348	for (i = 0; i <= MT_TXQ_PSD; i++) {
    349		q = mt76s_alloc_tx_queue(dev);
    350		if (IS_ERR(q))
    351			return PTR_ERR(q);
    352
    353		q->qid = i;
    354		dev->phy.q_tx[i] = q;
    355	}
    356
    357	q = mt76s_alloc_tx_queue(dev);
    358	if (IS_ERR(q))
    359		return PTR_ERR(q);
    360
    361	q->qid = MT_MCUQ_WM;
    362	dev->q_mcu[MT_MCUQ_WM] = q;
    363
    364	return 0;
    365}
    366EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
    367
    368static struct mt76_queue_entry *
    369mt76s_get_next_rx_entry(struct mt76_queue *q)
    370{
    371	struct mt76_queue_entry *e = NULL;
    372
    373	spin_lock_bh(&q->lock);
    374	if (q->queued > 0) {
    375		e = &q->entry[q->tail];
    376		q->tail = (q->tail + 1) % q->ndesc;
    377		q->queued--;
    378	}
    379	spin_unlock_bh(&q->lock);
    380
    381	return e;
    382}
    383
    384static int
    385mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
    386{
    387	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
    388	int nframes = 0;
    389
    390	while (true) {
    391		struct mt76_queue_entry *e;
    392
    393		if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
    394			break;
    395
    396		e = mt76s_get_next_rx_entry(q);
    397		if (!e || !e->skb)
    398			break;
    399
    400		dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
    401		e->skb = NULL;
    402		nframes++;
    403	}
    404	if (qid == MT_RXQ_MAIN)
    405		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
    406
    407	return nframes;
    408}
    409
    410static void mt76s_net_worker(struct mt76_worker *w)
    411{
    412	struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
    413					      net_worker);
    414	struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
    415	int i, nframes;
    416
    417	do {
    418		nframes = 0;
    419
    420		local_bh_disable();
    421		rcu_read_lock();
    422
    423		mt76_for_each_q_rx(dev, i)
    424			nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
    425
    426		rcu_read_unlock();
    427		local_bh_enable();
    428	} while (nframes > 0);
    429}
    430
    431static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
    432{
    433	struct mt76_queue_entry entry;
    434	int nframes = 0;
    435	bool mcu;
    436
    437	if (!q)
    438		return 0;
    439
    440	mcu = q == dev->q_mcu[MT_MCUQ_WM];
    441	while (q->queued > 0) {
    442		if (!q->entry[q->tail].done)
    443			break;
    444
    445		entry = q->entry[q->tail];
    446		q->entry[q->tail].done = false;
    447
    448		if (mcu) {
    449			dev_kfree_skb(entry.skb);
    450			entry.skb = NULL;
    451		}
    452
    453		mt76_queue_tx_complete(dev, q, &entry);
    454		nframes++;
    455	}
    456
    457	if (!q->queued)
    458		wake_up(&dev->tx_wait);
    459
    460	return nframes;
    461}
    462
    463static void mt76s_status_worker(struct mt76_worker *w)
    464{
    465	struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
    466					      status_worker);
    467	struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
    468	bool resched = false;
    469	int i, nframes;
    470
    471	do {
    472		int ndata_frames = 0;
    473
    474		nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
    475
    476		for (i = 0; i <= MT_TXQ_PSD; i++)
    477			ndata_frames += mt76s_process_tx_queue(dev,
    478							       dev->phy.q_tx[i]);
    479		nframes += ndata_frames;
    480		if (ndata_frames > 0)
    481			resched = true;
    482
    483		if (dev->drv->tx_status_data &&
    484		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
    485		    !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
    486			queue_work(dev->wq, &dev->sdio.stat_work);
    487	} while (nframes > 0);
    488
    489	if (resched)
    490		mt76_worker_schedule(&dev->sdio.txrx_worker);
    491}
    492
    493static void mt76s_tx_status_data(struct work_struct *work)
    494{
    495	struct mt76_sdio *sdio;
    496	struct mt76_dev *dev;
    497	u8 update = 1;
    498	u16 count = 0;
    499
    500	sdio = container_of(work, struct mt76_sdio, stat_work);
    501	dev = container_of(sdio, struct mt76_dev, sdio);
    502
    503	while (true) {
    504		if (test_bit(MT76_REMOVED, &dev->phy.state))
    505			break;
    506
    507		if (!dev->drv->tx_status_data(dev, &update))
    508			break;
    509		count++;
    510	}
    511
    512	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
    513		queue_work(dev->wq, &sdio->stat_work);
    514	else
    515		clear_bit(MT76_READING_STATS, &dev->phy.state);
    516}
    517
    518static int
    519mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
    520		   struct sk_buff *skb, struct mt76_wcid *wcid,
    521		   struct ieee80211_sta *sta)
    522{
    523	struct mt76_tx_info tx_info = {
    524		.skb = skb,
    525	};
    526	int err, len = skb->len;
    527	u16 idx = q->head;
    528
    529	if (q->queued == q->ndesc)
    530		return -ENOSPC;
    531
    532	skb->prev = skb->next = NULL;
    533	err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
    534	if (err < 0)
    535		return err;
    536
    537	q->entry[q->head].skb = tx_info.skb;
    538	q->entry[q->head].buf_sz = len;
    539	q->entry[q->head].wcid = 0xffff;
    540
    541	smp_wmb();
    542
    543	q->head = (q->head + 1) % q->ndesc;
    544	q->queued++;
    545
    546	return idx;
    547}
    548
    549static int
    550mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
    551		       struct sk_buff *skb, u32 tx_info)
    552{
    553	int ret = -ENOSPC, len = skb->len, pad;
    554
    555	if (q->queued == q->ndesc)
    556		goto error;
    557
    558	pad = round_up(skb->len, 4) - skb->len;
    559	ret = mt76_skb_adjust_pad(skb, pad);
    560	if (ret)
    561		goto error;
    562
    563	spin_lock_bh(&q->lock);
    564
    565	q->entry[q->head].buf_sz = len;
    566	q->entry[q->head].skb = skb;
    567	q->head = (q->head + 1) % q->ndesc;
    568	q->queued++;
    569
    570	spin_unlock_bh(&q->lock);
    571
    572	return 0;
    573
    574error:
    575	dev_kfree_skb(skb);
    576
    577	return ret;
    578}
    579
    580static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
    581{
    582	struct mt76_sdio *sdio = &dev->sdio;
    583
    584	mt76_worker_schedule(&sdio->txrx_worker);
    585}
    586
    587static const struct mt76_queue_ops sdio_queue_ops = {
    588	.tx_queue_skb = mt76s_tx_queue_skb,
    589	.kick = mt76s_tx_kick,
    590	.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
    591};
    592
    593void mt76s_deinit(struct mt76_dev *dev)
    594{
    595	struct mt76_sdio *sdio = &dev->sdio;
    596	int i;
    597
    598	mt76_worker_teardown(&sdio->txrx_worker);
    599	mt76_worker_teardown(&sdio->status_worker);
    600	mt76_worker_teardown(&sdio->net_worker);
    601
    602	cancel_work_sync(&sdio->stat_work);
    603	clear_bit(MT76_READING_STATS, &dev->phy.state);
    604
    605	mt76_tx_status_check(dev, true);
    606
    607	sdio_claim_host(sdio->func);
    608	sdio_release_irq(sdio->func);
    609	sdio_release_host(sdio->func);
    610
    611	mt76_for_each_q_rx(dev, i) {
    612		struct mt76_queue *q = &dev->q_rx[i];
    613		int j;
    614
    615		for (j = 0; j < q->ndesc; j++) {
    616			struct mt76_queue_entry *e = &q->entry[j];
    617
    618			if (!e->skb)
    619				continue;
    620
    621			dev_kfree_skb(e->skb);
    622			e->skb = NULL;
    623		}
    624	}
    625}
    626EXPORT_SYMBOL_GPL(mt76s_deinit);
    627
    628int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
    629	       const struct mt76_bus_ops *bus_ops)
    630{
    631	struct mt76_sdio *sdio = &dev->sdio;
    632	u32 host_max_cap;
    633	int err;
    634
    635	err = mt76_worker_setup(dev->hw, &sdio->status_worker,
    636				mt76s_status_worker, "sdio-status");
    637	if (err)
    638		return err;
    639
    640	err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
    641				"sdio-net");
    642	if (err)
    643		return err;
    644
    645	sched_set_fifo_low(sdio->status_worker.task);
    646	sched_set_fifo_low(sdio->net_worker.task);
    647
    648	INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
    649
    650	dev->queue_ops = &sdio_queue_ops;
    651	dev->bus = bus_ops;
    652	dev->sdio.func = func;
    653
    654	host_max_cap = min_t(u32, func->card->host->max_req_size,
    655			     func->cur_blksize *
    656			     func->card->host->max_blk_count);
    657	dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
    658	dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
    659					  GFP_KERNEL);
    660	if (!dev->sdio.xmit_buf)
    661		err = -ENOMEM;
    662
    663	return err;
    664}
    665EXPORT_SYMBOL_GPL(mt76s_init);
    666
    667MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
    668MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
    669MODULE_LICENSE("Dual BSD/GPL");