cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

jz4740_mmc.c (29940B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
      4 *  Copyright (C) 2013, Imagination Technologies
      5 *
      6 *  JZ4740 SD/MMC controller driver
      7 */
      8
      9#include <linux/bitops.h>
     10#include <linux/clk.h>
     11#include <linux/delay.h>
     12#include <linux/dmaengine.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/err.h>
     15#include <linux/interrupt.h>
     16#include <linux/io.h>
     17#include <linux/irq.h>
     18#include <linux/mmc/host.h>
     19#include <linux/mmc/slot-gpio.h>
     20#include <linux/module.h>
     21#include <linux/of_device.h>
     22#include <linux/pinctrl/consumer.h>
     23#include <linux/platform_device.h>
     24#include <linux/scatterlist.h>
     25
     26#include <asm/cacheflush.h>
     27
     28#define JZ_REG_MMC_STRPCL	0x00
     29#define JZ_REG_MMC_STATUS	0x04
     30#define JZ_REG_MMC_CLKRT	0x08
     31#define JZ_REG_MMC_CMDAT	0x0C
     32#define JZ_REG_MMC_RESTO	0x10
     33#define JZ_REG_MMC_RDTO		0x14
     34#define JZ_REG_MMC_BLKLEN	0x18
     35#define JZ_REG_MMC_NOB		0x1C
     36#define JZ_REG_MMC_SNOB		0x20
     37#define JZ_REG_MMC_IMASK	0x24
     38#define JZ_REG_MMC_IREG		0x28
     39#define JZ_REG_MMC_CMD		0x2C
     40#define JZ_REG_MMC_ARG		0x30
     41#define JZ_REG_MMC_RESP_FIFO	0x34
     42#define JZ_REG_MMC_RXFIFO	0x38
     43#define JZ_REG_MMC_TXFIFO	0x3C
     44#define JZ_REG_MMC_LPM		0x40
     45#define JZ_REG_MMC_DMAC		0x44
     46
     47#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
     48#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
     49#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
     50#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
     51#define JZ_MMC_STRPCL_RESET BIT(3)
     52#define JZ_MMC_STRPCL_START_OP BIT(2)
     53#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
     54#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
     55#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
     56
     57
     58#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
     59#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
     60#define JZ_MMC_STATUS_PRG_DONE BIT(13)
     61#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
     62#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
     63#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
     64#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
     65#define JZ_MMC_STATUS_CLK_EN BIT(8)
     66#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
     67#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
     68#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
     69#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
     70#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
     71#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
     72#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
     73#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
     74
     75#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
     76#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
     77
     78
     79#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
     80#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
     81#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
     82#define	JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
     83#define JZ_MMC_CMDAT_DMA_EN BIT(8)
     84#define JZ_MMC_CMDAT_INIT BIT(7)
     85#define JZ_MMC_CMDAT_BUSY BIT(6)
     86#define JZ_MMC_CMDAT_STREAM BIT(5)
     87#define JZ_MMC_CMDAT_WRITE BIT(4)
     88#define JZ_MMC_CMDAT_DATA_EN BIT(3)
     89#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
     90#define JZ_MMC_CMDAT_RSP_R1 1
     91#define JZ_MMC_CMDAT_RSP_R2 2
     92#define JZ_MMC_CMDAT_RSP_R3 3
     93
     94#define JZ_MMC_IRQ_SDIO BIT(7)
     95#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
     96#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
     97#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
     98#define JZ_MMC_IRQ_PRG_DONE BIT(1)
     99#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
    100
    101#define JZ_MMC_DMAC_DMA_SEL BIT(1)
    102#define JZ_MMC_DMAC_DMA_EN BIT(0)
    103
    104#define	JZ_MMC_LPM_DRV_RISING BIT(31)
    105#define	JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
    106#define	JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
    107#define	JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
    108#define	JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
    109
    110#define JZ_MMC_CLK_RATE 24000000
    111#define JZ_MMC_REQ_TIMEOUT_MS 5000
    112
    113enum jz4740_mmc_version {
    114	JZ_MMC_JZ4740,
    115	JZ_MMC_JZ4725B,
    116	JZ_MMC_JZ4760,
    117	JZ_MMC_JZ4780,
    118	JZ_MMC_X1000,
    119};
    120
    121enum jz4740_mmc_state {
    122	JZ4740_MMC_STATE_READ_RESPONSE,
    123	JZ4740_MMC_STATE_TRANSFER_DATA,
    124	JZ4740_MMC_STATE_SEND_STOP,
    125	JZ4740_MMC_STATE_DONE,
    126};
    127
    128/*
    129 * The MMC core allows to prepare a mmc_request while another mmc_request
    130 * is in-flight. This is used via the pre_req/post_req hooks.
    131 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
    132 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
    133 * flags to keep track of the mmc_request mapping state.
    134 *
    135 * COOKIE_UNMAPPED: the request is not mapped.
    136 * COOKIE_PREMAPPED: the request was mapped in pre_req,
    137 * and should be unmapped in post_req.
    138 * COOKIE_MAPPED: the request was mapped in the irq handler,
    139 * and should be unmapped before mmc_request_done is called..
    140 */
    141enum jz4780_cookie {
    142	COOKIE_UNMAPPED = 0,
    143	COOKIE_PREMAPPED,
    144	COOKIE_MAPPED,
    145};
    146
    147struct jz4740_mmc_host {
    148	struct mmc_host *mmc;
    149	struct platform_device *pdev;
    150	struct clk *clk;
    151
    152	enum jz4740_mmc_version version;
    153
    154	int irq;
    155
    156	void __iomem *base;
    157	struct resource *mem_res;
    158	struct mmc_request *req;
    159	struct mmc_command *cmd;
    160
    161	unsigned long waiting;
    162
    163	uint32_t cmdat;
    164
    165	uint32_t irq_mask;
    166
    167	spinlock_t lock;
    168
    169	struct timer_list timeout_timer;
    170	struct sg_mapping_iter miter;
    171	enum jz4740_mmc_state state;
    172
    173	/* DMA support */
    174	struct dma_chan *dma_rx;
    175	struct dma_chan *dma_tx;
    176	bool use_dma;
    177
    178/* The DMA trigger level is 8 words, that is to say, the DMA read
    179 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
    180 * trigger is when data words in MSC_TXFIFO is < 8.
    181 */
    182#define JZ4740_MMC_FIFO_HALF_SIZE 8
    183};
    184
    185static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
    186				      uint32_t val)
    187{
    188	if (host->version >= JZ_MMC_JZ4725B)
    189		return writel(val, host->base + JZ_REG_MMC_IMASK);
    190	else
    191		return writew(val, host->base + JZ_REG_MMC_IMASK);
    192}
    193
    194static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
    195				     uint32_t val)
    196{
    197	if (host->version >= JZ_MMC_JZ4780)
    198		writel(val, host->base + JZ_REG_MMC_IREG);
    199	else
    200		writew(val, host->base + JZ_REG_MMC_IREG);
    201}
    202
    203static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
    204{
    205	if (host->version >= JZ_MMC_JZ4780)
    206		return readl(host->base + JZ_REG_MMC_IREG);
    207	else
    208		return readw(host->base + JZ_REG_MMC_IREG);
    209}
    210
    211/*----------------------------------------------------------------------------*/
    212/* DMA infrastructure */
    213
    214static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
    215{
    216	if (!host->use_dma)
    217		return;
    218
    219	dma_release_channel(host->dma_tx);
    220	if (host->dma_rx)
    221		dma_release_channel(host->dma_rx);
    222}
    223
    224static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
    225{
    226	struct device *dev = mmc_dev(host->mmc);
    227
    228	host->dma_tx = dma_request_chan(dev, "tx-rx");
    229	if (!IS_ERR(host->dma_tx))
    230		return 0;
    231
    232	if (PTR_ERR(host->dma_tx) != -ENODEV) {
    233		dev_err(dev, "Failed to get dma tx-rx channel\n");
    234		return PTR_ERR(host->dma_tx);
    235	}
    236
    237	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
    238	if (IS_ERR(host->dma_tx)) {
    239		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
    240		return PTR_ERR(host->dma_tx);
    241	}
    242
    243	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
    244	if (IS_ERR(host->dma_rx)) {
    245		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
    246		dma_release_channel(host->dma_tx);
    247		return PTR_ERR(host->dma_rx);
    248	}
    249
    250	/*
    251	 * Limit the maximum segment size in any SG entry according to
    252	 * the parameters of the DMA engine device.
    253	 */
    254	if (host->dma_tx) {
    255		struct device *dev = host->dma_tx->device->dev;
    256		unsigned int max_seg_size = dma_get_max_seg_size(dev);
    257
    258		if (max_seg_size < host->mmc->max_seg_size)
    259			host->mmc->max_seg_size = max_seg_size;
    260	}
    261
    262	if (host->dma_rx) {
    263		struct device *dev = host->dma_rx->device->dev;
    264		unsigned int max_seg_size = dma_get_max_seg_size(dev);
    265
    266		if (max_seg_size < host->mmc->max_seg_size)
    267			host->mmc->max_seg_size = max_seg_size;
    268	}
    269
    270	return 0;
    271}
    272
    273static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
    274						       struct mmc_data *data)
    275{
    276	if ((data->flags & MMC_DATA_READ) && host->dma_rx)
    277		return host->dma_rx;
    278	else
    279		return host->dma_tx;
    280}
    281
    282static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
    283				 struct mmc_data *data)
    284{
    285	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
    286	enum dma_data_direction dir = mmc_get_dma_dir(data);
    287
    288	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
    289	data->host_cookie = COOKIE_UNMAPPED;
    290}
    291
    292/* Prepares DMA data for current or next transfer.
    293 * A request can be in-flight when this is called.
    294 */
    295static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
    296				       struct mmc_data *data,
    297				       int cookie)
    298{
    299	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
    300	enum dma_data_direction dir = mmc_get_dma_dir(data);
    301	int sg_count;
    302
    303	if (data->host_cookie == COOKIE_PREMAPPED)
    304		return data->sg_count;
    305
    306	sg_count = dma_map_sg(chan->device->dev,
    307			data->sg,
    308			data->sg_len,
    309			dir);
    310
    311	if (sg_count <= 0) {
    312		dev_err(mmc_dev(host->mmc),
    313			"Failed to map scatterlist for DMA operation\n");
    314		return -EINVAL;
    315	}
    316
    317	data->sg_count = sg_count;
    318	data->host_cookie = cookie;
    319
    320	return data->sg_count;
    321}
    322
    323static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
    324					 struct mmc_data *data)
    325{
    326	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
    327	struct dma_async_tx_descriptor *desc;
    328	struct dma_slave_config conf = {
    329		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    330		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
    331		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
    332		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
    333	};
    334	int sg_count;
    335
    336	if (data->flags & MMC_DATA_WRITE) {
    337		conf.direction = DMA_MEM_TO_DEV;
    338		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
    339	} else {
    340		conf.direction = DMA_DEV_TO_MEM;
    341		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
    342	}
    343
    344	sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
    345	if (sg_count < 0)
    346		return sg_count;
    347
    348	dmaengine_slave_config(chan, &conf);
    349	desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
    350			conf.direction,
    351			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    352	if (!desc) {
    353		dev_err(mmc_dev(host->mmc),
    354			"Failed to allocate DMA %s descriptor",
    355			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
    356		goto dma_unmap;
    357	}
    358
    359	dmaengine_submit(desc);
    360	dma_async_issue_pending(chan);
    361
    362	return 0;
    363
    364dma_unmap:
    365	if (data->host_cookie == COOKIE_MAPPED)
    366		jz4740_mmc_dma_unmap(host, data);
    367	return -ENOMEM;
    368}
    369
    370static void jz4740_mmc_pre_request(struct mmc_host *mmc,
    371				   struct mmc_request *mrq)
    372{
    373	struct jz4740_mmc_host *host = mmc_priv(mmc);
    374	struct mmc_data *data = mrq->data;
    375
    376	if (!host->use_dma)
    377		return;
    378
    379	data->host_cookie = COOKIE_UNMAPPED;
    380	if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
    381		data->host_cookie = COOKIE_UNMAPPED;
    382}
    383
    384static void jz4740_mmc_post_request(struct mmc_host *mmc,
    385				    struct mmc_request *mrq,
    386				    int err)
    387{
    388	struct jz4740_mmc_host *host = mmc_priv(mmc);
    389	struct mmc_data *data = mrq->data;
    390
    391	if (data && data->host_cookie != COOKIE_UNMAPPED)
    392		jz4740_mmc_dma_unmap(host, data);
    393
    394	if (err) {
    395		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
    396
    397		dmaengine_terminate_all(chan);
    398	}
    399}
    400
    401/*----------------------------------------------------------------------------*/
    402
    403static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
    404	unsigned int irq, bool enabled)
    405{
    406	unsigned long flags;
    407
    408	spin_lock_irqsave(&host->lock, flags);
    409	if (enabled)
    410		host->irq_mask &= ~irq;
    411	else
    412		host->irq_mask |= irq;
    413
    414	jz4740_mmc_write_irq_mask(host, host->irq_mask);
    415	spin_unlock_irqrestore(&host->lock, flags);
    416}
    417
    418static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
    419	bool start_transfer)
    420{
    421	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
    422
    423	if (start_transfer)
    424		val |= JZ_MMC_STRPCL_START_OP;
    425
    426	writew(val, host->base + JZ_REG_MMC_STRPCL);
    427}
    428
    429static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
    430{
    431	uint32_t status;
    432	unsigned int timeout = 1000;
    433
    434	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
    435	do {
    436		status = readl(host->base + JZ_REG_MMC_STATUS);
    437	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
    438}
    439
    440static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
    441{
    442	uint32_t status;
    443	unsigned int timeout = 1000;
    444
    445	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
    446	udelay(10);
    447	do {
    448		status = readl(host->base + JZ_REG_MMC_STATUS);
    449	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
    450}
    451
    452static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
    453{
    454	struct mmc_request *req;
    455	struct mmc_data *data;
    456
    457	req = host->req;
    458	data = req->data;
    459	host->req = NULL;
    460
    461	if (data && data->host_cookie == COOKIE_MAPPED)
    462		jz4740_mmc_dma_unmap(host, data);
    463	mmc_request_done(host->mmc, req);
    464}
    465
    466static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
    467	unsigned int irq)
    468{
    469	unsigned int timeout = 0x800;
    470	uint32_t status;
    471
    472	do {
    473		status = jz4740_mmc_read_irq_reg(host);
    474	} while (!(status & irq) && --timeout);
    475
    476	if (timeout == 0) {
    477		set_bit(0, &host->waiting);
    478		mod_timer(&host->timeout_timer,
    479			  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
    480		jz4740_mmc_set_irq_enabled(host, irq, true);
    481		return true;
    482	}
    483
    484	return false;
    485}
    486
    487static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
    488	struct mmc_data *data)
    489{
    490	int status;
    491
    492	status = readl(host->base + JZ_REG_MMC_STATUS);
    493	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
    494		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
    495			host->req->cmd->error = -ETIMEDOUT;
    496			data->error = -ETIMEDOUT;
    497		} else {
    498			host->req->cmd->error = -EIO;
    499			data->error = -EIO;
    500		}
    501	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
    502		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
    503			host->req->cmd->error = -ETIMEDOUT;
    504			data->error = -ETIMEDOUT;
    505		} else {
    506			host->req->cmd->error = -EIO;
    507			data->error = -EIO;
    508		}
    509	}
    510}
    511
    512static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
    513	struct mmc_data *data)
    514{
    515	struct sg_mapping_iter *miter = &host->miter;
    516	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
    517	uint32_t *buf;
    518	bool timeout;
    519	size_t i, j;
    520
    521	while (sg_miter_next(miter)) {
    522		buf = miter->addr;
    523		i = miter->length / 4;
    524		j = i / 8;
    525		i = i & 0x7;
    526		while (j) {
    527			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
    528			if (unlikely(timeout))
    529				goto poll_timeout;
    530
    531			writel(buf[0], fifo_addr);
    532			writel(buf[1], fifo_addr);
    533			writel(buf[2], fifo_addr);
    534			writel(buf[3], fifo_addr);
    535			writel(buf[4], fifo_addr);
    536			writel(buf[5], fifo_addr);
    537			writel(buf[6], fifo_addr);
    538			writel(buf[7], fifo_addr);
    539			buf += 8;
    540			--j;
    541		}
    542		if (unlikely(i)) {
    543			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
    544			if (unlikely(timeout))
    545				goto poll_timeout;
    546
    547			while (i) {
    548				writel(*buf, fifo_addr);
    549				++buf;
    550				--i;
    551			}
    552		}
    553		data->bytes_xfered += miter->length;
    554	}
    555	sg_miter_stop(miter);
    556
    557	return false;
    558
    559poll_timeout:
    560	miter->consumed = (void *)buf - miter->addr;
    561	data->bytes_xfered += miter->consumed;
    562	sg_miter_stop(miter);
    563
    564	return true;
    565}
    566
    567static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
    568				struct mmc_data *data)
    569{
    570	struct sg_mapping_iter *miter = &host->miter;
    571	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
    572	uint32_t *buf;
    573	uint32_t d;
    574	uint32_t status;
    575	size_t i, j;
    576	unsigned int timeout;
    577
    578	while (sg_miter_next(miter)) {
    579		buf = miter->addr;
    580		i = miter->length;
    581		j = i / 32;
    582		i = i & 0x1f;
    583		while (j) {
    584			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
    585			if (unlikely(timeout))
    586				goto poll_timeout;
    587
    588			buf[0] = readl(fifo_addr);
    589			buf[1] = readl(fifo_addr);
    590			buf[2] = readl(fifo_addr);
    591			buf[3] = readl(fifo_addr);
    592			buf[4] = readl(fifo_addr);
    593			buf[5] = readl(fifo_addr);
    594			buf[6] = readl(fifo_addr);
    595			buf[7] = readl(fifo_addr);
    596
    597			buf += 8;
    598			--j;
    599		}
    600
    601		if (unlikely(i)) {
    602			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
    603			if (unlikely(timeout))
    604				goto poll_timeout;
    605
    606			while (i >= 4) {
    607				*buf++ = readl(fifo_addr);
    608				i -= 4;
    609			}
    610			if (unlikely(i > 0)) {
    611				d = readl(fifo_addr);
    612				memcpy(buf, &d, i);
    613			}
    614		}
    615		data->bytes_xfered += miter->length;
    616	}
    617	sg_miter_stop(miter);
    618
    619	/* For whatever reason there is sometime one word more in the fifo then
    620	 * requested */
    621	timeout = 1000;
    622	status = readl(host->base + JZ_REG_MMC_STATUS);
    623	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
    624		d = readl(fifo_addr);
    625		status = readl(host->base + JZ_REG_MMC_STATUS);
    626	}
    627
    628	return false;
    629
    630poll_timeout:
    631	miter->consumed = (void *)buf - miter->addr;
    632	data->bytes_xfered += miter->consumed;
    633	sg_miter_stop(miter);
    634
    635	return true;
    636}
    637
    638static void jz4740_mmc_timeout(struct timer_list *t)
    639{
    640	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
    641
    642	if (!test_and_clear_bit(0, &host->waiting))
    643		return;
    644
    645	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
    646
    647	host->req->cmd->error = -ETIMEDOUT;
    648	jz4740_mmc_request_done(host);
    649}
    650
    651static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
    652	struct mmc_command *cmd)
    653{
    654	int i;
    655	uint16_t tmp;
    656	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
    657
    658	if (cmd->flags & MMC_RSP_136) {
    659		tmp = readw(fifo_addr);
    660		for (i = 0; i < 4; ++i) {
    661			cmd->resp[i] = tmp << 24;
    662			tmp = readw(fifo_addr);
    663			cmd->resp[i] |= tmp << 8;
    664			tmp = readw(fifo_addr);
    665			cmd->resp[i] |= tmp >> 8;
    666		}
    667	} else {
    668		cmd->resp[0] = readw(fifo_addr) << 24;
    669		cmd->resp[0] |= readw(fifo_addr) << 8;
    670		cmd->resp[0] |= readw(fifo_addr) & 0xff;
    671	}
    672}
    673
    674static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
    675	struct mmc_command *cmd)
    676{
    677	uint32_t cmdat = host->cmdat;
    678
    679	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
    680	jz4740_mmc_clock_disable(host);
    681
    682	host->cmd = cmd;
    683
    684	if (cmd->flags & MMC_RSP_BUSY)
    685		cmdat |= JZ_MMC_CMDAT_BUSY;
    686
    687	switch (mmc_resp_type(cmd)) {
    688	case MMC_RSP_R1B:
    689	case MMC_RSP_R1:
    690		cmdat |= JZ_MMC_CMDAT_RSP_R1;
    691		break;
    692	case MMC_RSP_R2:
    693		cmdat |= JZ_MMC_CMDAT_RSP_R2;
    694		break;
    695	case MMC_RSP_R3:
    696		cmdat |= JZ_MMC_CMDAT_RSP_R3;
    697		break;
    698	default:
    699		break;
    700	}
    701
    702	if (cmd->data) {
    703		cmdat |= JZ_MMC_CMDAT_DATA_EN;
    704		if (cmd->data->flags & MMC_DATA_WRITE)
    705			cmdat |= JZ_MMC_CMDAT_WRITE;
    706		if (host->use_dma) {
    707			/*
    708			 * The JZ4780's MMC controller has integrated DMA ability
    709			 * in addition to being able to use the external DMA
    710			 * controller. It moves DMA control bits to a separate
    711			 * register. The DMA_SEL bit chooses the external
    712			 * controller over the integrated one. Earlier SoCs
    713			 * can only use the external controller, and have a
    714			 * single DMA enable bit in CMDAT.
    715			 */
    716			if (host->version >= JZ_MMC_JZ4780) {
    717				writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
    718				       host->base + JZ_REG_MMC_DMAC);
    719			} else {
    720				cmdat |= JZ_MMC_CMDAT_DMA_EN;
    721			}
    722		} else if (host->version >= JZ_MMC_JZ4780) {
    723			writel(0, host->base + JZ_REG_MMC_DMAC);
    724		}
    725
    726		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
    727		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
    728	}
    729
    730	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
    731	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
    732	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
    733
    734	jz4740_mmc_clock_enable(host, 1);
    735}
    736
    737static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
    738{
    739	struct mmc_command *cmd = host->req->cmd;
    740	struct mmc_data *data = cmd->data;
    741	int direction;
    742
    743	if (data->flags & MMC_DATA_READ)
    744		direction = SG_MITER_TO_SG;
    745	else
    746		direction = SG_MITER_FROM_SG;
    747
    748	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
    749}
    750
    751
    752static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
    753{
    754	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
    755	struct mmc_command *cmd = host->req->cmd;
    756	struct mmc_request *req = host->req;
    757	struct mmc_data *data = cmd->data;
    758	bool timeout = false;
    759
    760	if (cmd->error)
    761		host->state = JZ4740_MMC_STATE_DONE;
    762
    763	switch (host->state) {
    764	case JZ4740_MMC_STATE_READ_RESPONSE:
    765		if (cmd->flags & MMC_RSP_PRESENT)
    766			jz4740_mmc_read_response(host, cmd);
    767
    768		if (!data)
    769			break;
    770
    771		jz_mmc_prepare_data_transfer(host);
    772		fallthrough;
    773
    774	case JZ4740_MMC_STATE_TRANSFER_DATA:
    775		if (host->use_dma) {
    776			/* Use DMA if enabled.
    777			 * Data transfer direction is defined later by
    778			 * relying on data flags in
    779			 * jz4740_mmc_prepare_dma_data() and
    780			 * jz4740_mmc_start_dma_transfer().
    781			 */
    782			timeout = jz4740_mmc_start_dma_transfer(host, data);
    783			data->bytes_xfered = data->blocks * data->blksz;
    784		} else if (data->flags & MMC_DATA_READ)
    785			/* Use PIO if DMA is not enabled.
    786			 * Data transfer direction was defined before
    787			 * by relying on data flags in
    788			 * jz_mmc_prepare_data_transfer().
    789			 */
    790			timeout = jz4740_mmc_read_data(host, data);
    791		else
    792			timeout = jz4740_mmc_write_data(host, data);
    793
    794		if (unlikely(timeout)) {
    795			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
    796			break;
    797		}
    798
    799		jz4740_mmc_transfer_check_state(host, data);
    800
    801		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
    802		if (unlikely(timeout)) {
    803			host->state = JZ4740_MMC_STATE_SEND_STOP;
    804			break;
    805		}
    806		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
    807		fallthrough;
    808
    809	case JZ4740_MMC_STATE_SEND_STOP:
    810		if (!req->stop)
    811			break;
    812
    813		jz4740_mmc_send_command(host, req->stop);
    814
    815		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
    816			timeout = jz4740_mmc_poll_irq(host,
    817						      JZ_MMC_IRQ_PRG_DONE);
    818			if (timeout) {
    819				host->state = JZ4740_MMC_STATE_DONE;
    820				break;
    821			}
    822		}
    823		fallthrough;
    824
    825	case JZ4740_MMC_STATE_DONE:
    826		break;
    827	}
    828
    829	if (!timeout)
    830		jz4740_mmc_request_done(host);
    831
    832	return IRQ_HANDLED;
    833}
    834
    835static irqreturn_t jz_mmc_irq(int irq, void *devid)
    836{
    837	struct jz4740_mmc_host *host = devid;
    838	struct mmc_command *cmd = host->cmd;
    839	uint32_t irq_reg, status, tmp;
    840
    841	status = readl(host->base + JZ_REG_MMC_STATUS);
    842	irq_reg = jz4740_mmc_read_irq_reg(host);
    843
    844	tmp = irq_reg;
    845	irq_reg &= ~host->irq_mask;
    846
    847	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
    848		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
    849
    850	if (tmp != irq_reg)
    851		jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
    852
    853	if (irq_reg & JZ_MMC_IRQ_SDIO) {
    854		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
    855		mmc_signal_sdio_irq(host->mmc);
    856		irq_reg &= ~JZ_MMC_IRQ_SDIO;
    857	}
    858
    859	if (host->req && cmd && irq_reg) {
    860		if (test_and_clear_bit(0, &host->waiting)) {
    861			del_timer(&host->timeout_timer);
    862
    863			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
    864				cmd->error = -ETIMEDOUT;
    865			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
    866				cmd->error = -EIO;
    867			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
    868				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
    869				if (cmd->data)
    870					cmd->data->error = -EIO;
    871				cmd->error = -EIO;
    872			}
    873
    874			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
    875			jz4740_mmc_write_irq_reg(host, irq_reg);
    876
    877			return IRQ_WAKE_THREAD;
    878		}
    879	}
    880
    881	return IRQ_HANDLED;
    882}
    883
    884static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
    885{
    886	int div = 0;
    887	int real_rate;
    888
    889	jz4740_mmc_clock_disable(host);
    890	clk_set_rate(host->clk, host->mmc->f_max);
    891
    892	real_rate = clk_get_rate(host->clk);
    893
    894	while (real_rate > rate && div < 7) {
    895		++div;
    896		real_rate >>= 1;
    897	}
    898
    899	writew(div, host->base + JZ_REG_MMC_CLKRT);
    900
    901	if (real_rate > 25000000) {
    902		if (host->version >= JZ_MMC_JZ4780) {
    903			writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
    904				   JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
    905				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
    906				   host->base + JZ_REG_MMC_LPM);
    907		} else if (host->version >= JZ_MMC_JZ4760) {
    908			writel(JZ_MMC_LPM_DRV_RISING |
    909				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
    910				   host->base + JZ_REG_MMC_LPM);
    911		} else if (host->version >= JZ_MMC_JZ4725B)
    912			writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
    913				   host->base + JZ_REG_MMC_LPM);
    914	}
    915
    916	return real_rate;
    917}
    918
    919static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
    920{
    921	struct jz4740_mmc_host *host = mmc_priv(mmc);
    922
    923	host->req = req;
    924
    925	jz4740_mmc_write_irq_reg(host, ~0);
    926	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
    927
    928	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
    929	set_bit(0, &host->waiting);
    930	mod_timer(&host->timeout_timer,
    931		  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
    932	jz4740_mmc_send_command(host, req->cmd);
    933}
    934
    935static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
    936{
    937	struct jz4740_mmc_host *host = mmc_priv(mmc);
    938	if (ios->clock)
    939		jz4740_mmc_set_clock_rate(host, ios->clock);
    940
    941	switch (ios->power_mode) {
    942	case MMC_POWER_UP:
    943		jz4740_mmc_reset(host);
    944		if (!IS_ERR(mmc->supply.vmmc))
    945			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
    946		host->cmdat |= JZ_MMC_CMDAT_INIT;
    947		clk_prepare_enable(host->clk);
    948		break;
    949	case MMC_POWER_ON:
    950		break;
    951	default:
    952		if (!IS_ERR(mmc->supply.vmmc))
    953			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
    954		clk_disable_unprepare(host->clk);
    955		break;
    956	}
    957
    958	switch (ios->bus_width) {
    959	case MMC_BUS_WIDTH_1:
    960		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
    961		break;
    962	case MMC_BUS_WIDTH_4:
    963		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
    964		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
    965		break;
    966	case MMC_BUS_WIDTH_8:
    967		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
    968		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
    969		break;
    970	default:
    971		break;
    972	}
    973}
    974
    975static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
    976{
    977	struct jz4740_mmc_host *host = mmc_priv(mmc);
    978	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
    979}
    980
    981static const struct mmc_host_ops jz4740_mmc_ops = {
    982	.request	= jz4740_mmc_request,
    983	.pre_req	= jz4740_mmc_pre_request,
    984	.post_req	= jz4740_mmc_post_request,
    985	.set_ios	= jz4740_mmc_set_ios,
    986	.get_ro		= mmc_gpio_get_ro,
    987	.get_cd		= mmc_gpio_get_cd,
    988	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
    989};
    990
    991static const struct of_device_id jz4740_mmc_of_match[] = {
    992	{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
    993	{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
    994	{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
    995	{ .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
    996	{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
    997	{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
    998	{},
    999};
   1000MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
   1001
   1002static int jz4740_mmc_probe(struct platform_device* pdev)
   1003{
   1004	int ret;
   1005	struct mmc_host *mmc;
   1006	struct jz4740_mmc_host *host;
   1007	const struct of_device_id *match;
   1008
   1009	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
   1010	if (!mmc) {
   1011		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
   1012		return -ENOMEM;
   1013	}
   1014
   1015	host = mmc_priv(mmc);
   1016
   1017	match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
   1018	if (match) {
   1019		host->version = (enum jz4740_mmc_version)match->data;
   1020	} else {
   1021		/* JZ4740 should be the only one using legacy probe */
   1022		host->version = JZ_MMC_JZ4740;
   1023	}
   1024
   1025	ret = mmc_of_parse(mmc);
   1026	if (ret) {
   1027		dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
   1028		goto err_free_host;
   1029	}
   1030
   1031	mmc_regulator_get_supply(mmc);
   1032
   1033	host->irq = platform_get_irq(pdev, 0);
   1034	if (host->irq < 0) {
   1035		ret = host->irq;
   1036		goto err_free_host;
   1037	}
   1038
   1039	host->clk = devm_clk_get(&pdev->dev, "mmc");
   1040	if (IS_ERR(host->clk)) {
   1041		ret = PTR_ERR(host->clk);
   1042		dev_err(&pdev->dev, "Failed to get mmc clock\n");
   1043		goto err_free_host;
   1044	}
   1045
   1046	host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1047	host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
   1048	if (IS_ERR(host->base)) {
   1049		ret = PTR_ERR(host->base);
   1050		goto err_free_host;
   1051	}
   1052
   1053	mmc->ops = &jz4740_mmc_ops;
   1054	if (!mmc->f_max)
   1055		mmc->f_max = JZ_MMC_CLK_RATE;
   1056	mmc->f_min = mmc->f_max / 128;
   1057	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
   1058
   1059	/*
   1060	 * We use a fixed timeout of 5s, hence inform the core about it. A
   1061	 * future improvement should instead respect the cmd->busy_timeout.
   1062	 */
   1063	mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
   1064
   1065	mmc->max_blk_size = (1 << 10) - 1;
   1066	mmc->max_blk_count = (1 << 15) - 1;
   1067	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
   1068
   1069	mmc->max_segs = 128;
   1070	mmc->max_seg_size = mmc->max_req_size;
   1071
   1072	host->mmc = mmc;
   1073	host->pdev = pdev;
   1074	spin_lock_init(&host->lock);
   1075	host->irq_mask = ~0;
   1076
   1077	jz4740_mmc_reset(host);
   1078
   1079	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
   1080			dev_name(&pdev->dev), host);
   1081	if (ret) {
   1082		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
   1083		goto err_free_host;
   1084	}
   1085
   1086	jz4740_mmc_clock_disable(host);
   1087	timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
   1088
   1089	ret = jz4740_mmc_acquire_dma_channels(host);
   1090	if (ret == -EPROBE_DEFER)
   1091		goto err_free_irq;
   1092	host->use_dma = !ret;
   1093
   1094	platform_set_drvdata(pdev, host);
   1095	ret = mmc_add_host(mmc);
   1096
   1097	if (ret) {
   1098		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
   1099		goto err_release_dma;
   1100	}
   1101	dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
   1102
   1103	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
   1104		 host->use_dma ? "DMA" : "PIO",
   1105		 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
   1106		 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
   1107
   1108	return 0;
   1109
   1110err_release_dma:
   1111	if (host->use_dma)
   1112		jz4740_mmc_release_dma_channels(host);
   1113err_free_irq:
   1114	free_irq(host->irq, host);
   1115err_free_host:
   1116	mmc_free_host(mmc);
   1117
   1118	return ret;
   1119}
   1120
   1121static int jz4740_mmc_remove(struct platform_device *pdev)
   1122{
   1123	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
   1124
   1125	del_timer_sync(&host->timeout_timer);
   1126	jz4740_mmc_set_irq_enabled(host, 0xff, false);
   1127	jz4740_mmc_reset(host);
   1128
   1129	mmc_remove_host(host->mmc);
   1130
   1131	free_irq(host->irq, host);
   1132
   1133	if (host->use_dma)
   1134		jz4740_mmc_release_dma_channels(host);
   1135
   1136	mmc_free_host(host->mmc);
   1137
   1138	return 0;
   1139}
   1140
   1141static int jz4740_mmc_suspend(struct device *dev)
   1142{
   1143	return pinctrl_pm_select_sleep_state(dev);
   1144}
   1145
   1146static int jz4740_mmc_resume(struct device *dev)
   1147{
   1148	return pinctrl_select_default_state(dev);
   1149}
   1150
   1151static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
   1152				jz4740_mmc_resume);
   1153
   1154static struct platform_driver jz4740_mmc_driver = {
   1155	.probe = jz4740_mmc_probe,
   1156	.remove = jz4740_mmc_remove,
   1157	.driver = {
   1158		.name = "jz4740-mmc",
   1159		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
   1160		.of_match_table = of_match_ptr(jz4740_mmc_of_match),
   1161		.pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
   1162	},
   1163};
   1164
   1165module_platform_driver(jz4740_mmc_driver);
   1166
   1167MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
   1168MODULE_LICENSE("GPL");
   1169MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");