cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-mem.c (24717B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Copyright (C) 2018 Exceet Electronics GmbH
      4 * Copyright (C) 2018 Bootlin
      5 *
      6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
      7 */
      8#include <linux/dmaengine.h>
      9#include <linux/iopoll.h>
     10#include <linux/pm_runtime.h>
     11#include <linux/spi/spi.h>
     12#include <linux/spi/spi-mem.h>
     13#include <linux/sched/task_stack.h>
     14
     15#include "internals.h"
     16
     17#define SPI_MEM_MAX_BUSWIDTH		8
     18
     19/**
     20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
     21 *					  memory operation
     22 * @ctlr: the SPI controller requesting this dma_map()
     23 * @op: the memory operation containing the buffer to map
     24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
     25 *	 function
     26 *
     27 * Some controllers might want to do DMA on the data buffer embedded in @op.
     28 * This helper prepares everything for you and provides a ready-to-use
     29 * sg_table. This function is not intended to be called from spi drivers.
     30 * Only SPI controller drivers should use it.
     31 * Note that the caller must ensure the memory region pointed by
     32 * op->data.buf.{in,out} is DMA-able before calling this function.
     33 *
     34 * Return: 0 in case of success, a negative error code otherwise.
     35 */
     36int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
     37				       const struct spi_mem_op *op,
     38				       struct sg_table *sgt)
     39{
     40	struct device *dmadev;
     41
     42	if (!op->data.nbytes)
     43		return -EINVAL;
     44
     45	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
     46		dmadev = ctlr->dma_tx->device->dev;
     47	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
     48		dmadev = ctlr->dma_rx->device->dev;
     49	else
     50		dmadev = ctlr->dev.parent;
     51
     52	if (!dmadev)
     53		return -EINVAL;
     54
     55	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
     56			   op->data.dir == SPI_MEM_DATA_IN ?
     57			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
     58}
     59EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
     60
     61/**
     62 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
     63 *					    memory operation
     64 * @ctlr: the SPI controller requesting this dma_unmap()
     65 * @op: the memory operation containing the buffer to unmap
     66 * @sgt: a pointer to an sg_table previously initialized by
     67 *	 spi_controller_dma_map_mem_op_data()
     68 *
     69 * Some controllers might want to do DMA on the data buffer embedded in @op.
     70 * This helper prepares things so that the CPU can access the
     71 * op->data.buf.{in,out} buffer again.
     72 *
     73 * This function is not intended to be called from SPI drivers. Only SPI
     74 * controller drivers should use it.
     75 *
     76 * This function should be called after the DMA operation has finished and is
     77 * only valid if the previous spi_controller_dma_map_mem_op_data() call
     78 * returned 0.
     79 *
     80 * Return: 0 in case of success, a negative error code otherwise.
     81 */
     82void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
     83					  const struct spi_mem_op *op,
     84					  struct sg_table *sgt)
     85{
     86	struct device *dmadev;
     87
     88	if (!op->data.nbytes)
     89		return;
     90
     91	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
     92		dmadev = ctlr->dma_tx->device->dev;
     93	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
     94		dmadev = ctlr->dma_rx->device->dev;
     95	else
     96		dmadev = ctlr->dev.parent;
     97
     98	spi_unmap_buf(ctlr, dmadev, sgt,
     99		      op->data.dir == SPI_MEM_DATA_IN ?
    100		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
    101}
    102EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
    103
    104static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
    105{
    106	u32 mode = mem->spi->mode;
    107
    108	switch (buswidth) {
    109	case 1:
    110		return 0;
    111
    112	case 2:
    113		if ((tx &&
    114		     (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
    115		    (!tx &&
    116		     (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
    117			return 0;
    118
    119		break;
    120
    121	case 4:
    122		if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
    123		    (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
    124			return 0;
    125
    126		break;
    127
    128	case 8:
    129		if ((tx && (mode & SPI_TX_OCTAL)) ||
    130		    (!tx && (mode & SPI_RX_OCTAL)))
    131			return 0;
    132
    133		break;
    134
    135	default:
    136		break;
    137	}
    138
    139	return -ENOTSUPP;
    140}
    141
    142static bool spi_mem_check_buswidth(struct spi_mem *mem,
    143				   const struct spi_mem_op *op)
    144{
    145	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
    146		return false;
    147
    148	if (op->addr.nbytes &&
    149	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
    150		return false;
    151
    152	if (op->dummy.nbytes &&
    153	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
    154		return false;
    155
    156	if (op->data.dir != SPI_MEM_NO_DATA &&
    157	    spi_check_buswidth_req(mem, op->data.buswidth,
    158				   op->data.dir == SPI_MEM_DATA_OUT))
    159		return false;
    160
    161	return true;
    162}
    163
    164bool spi_mem_default_supports_op(struct spi_mem *mem,
    165				 const struct spi_mem_op *op)
    166{
    167	struct spi_controller *ctlr = mem->spi->controller;
    168	bool op_is_dtr =
    169		op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
    170
    171	if (op_is_dtr) {
    172		if (!spi_mem_controller_is_capable(ctlr, dtr))
    173			return false;
    174
    175		if (op->cmd.nbytes != 2)
    176			return false;
    177	} else {
    178		if (op->cmd.nbytes != 1)
    179			return false;
    180	}
    181
    182	if (op->data.ecc) {
    183		if (!spi_mem_controller_is_capable(ctlr, ecc))
    184			return false;
    185	}
    186
    187	return spi_mem_check_buswidth(mem, op);
    188}
    189EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
    190
    191static bool spi_mem_buswidth_is_valid(u8 buswidth)
    192{
    193	if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
    194		return false;
    195
    196	return true;
    197}
    198
    199static int spi_mem_check_op(const struct spi_mem_op *op)
    200{
    201	if (!op->cmd.buswidth || !op->cmd.nbytes)
    202		return -EINVAL;
    203
    204	if ((op->addr.nbytes && !op->addr.buswidth) ||
    205	    (op->dummy.nbytes && !op->dummy.buswidth) ||
    206	    (op->data.nbytes && !op->data.buswidth))
    207		return -EINVAL;
    208
    209	if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
    210	    !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
    211	    !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
    212	    !spi_mem_buswidth_is_valid(op->data.buswidth))
    213		return -EINVAL;
    214
    215	/* Buffers must be DMA-able. */
    216	if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
    217			 object_is_on_stack(op->data.buf.in)))
    218		return -EINVAL;
    219
    220	if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
    221			 object_is_on_stack(op->data.buf.out)))
    222		return -EINVAL;
    223
    224	return 0;
    225}
    226
    227static bool spi_mem_internal_supports_op(struct spi_mem *mem,
    228					 const struct spi_mem_op *op)
    229{
    230	struct spi_controller *ctlr = mem->spi->controller;
    231
    232	if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
    233		return ctlr->mem_ops->supports_op(mem, op);
    234
    235	return spi_mem_default_supports_op(mem, op);
    236}
    237
    238/**
    239 * spi_mem_supports_op() - Check if a memory device and the controller it is
    240 *			   connected to support a specific memory operation
    241 * @mem: the SPI memory
    242 * @op: the memory operation to check
    243 *
    244 * Some controllers are only supporting Single or Dual IOs, others might only
    245 * support specific opcodes, or it can even be that the controller and device
    246 * both support Quad IOs but the hardware prevents you from using it because
    247 * only 2 IO lines are connected.
    248 *
    249 * This function checks whether a specific operation is supported.
    250 *
    251 * Return: true if @op is supported, false otherwise.
    252 */
    253bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
    254{
    255	if (spi_mem_check_op(op))
    256		return false;
    257
    258	return spi_mem_internal_supports_op(mem, op);
    259}
    260EXPORT_SYMBOL_GPL(spi_mem_supports_op);
    261
    262static int spi_mem_access_start(struct spi_mem *mem)
    263{
    264	struct spi_controller *ctlr = mem->spi->controller;
    265
    266	/*
    267	 * Flush the message queue before executing our SPI memory
    268	 * operation to prevent preemption of regular SPI transfers.
    269	 */
    270	spi_flush_queue(ctlr);
    271
    272	if (ctlr->auto_runtime_pm) {
    273		int ret;
    274
    275		ret = pm_runtime_resume_and_get(ctlr->dev.parent);
    276		if (ret < 0) {
    277			dev_err(&ctlr->dev, "Failed to power device: %d\n",
    278				ret);
    279			return ret;
    280		}
    281	}
    282
    283	mutex_lock(&ctlr->bus_lock_mutex);
    284	mutex_lock(&ctlr->io_mutex);
    285
    286	return 0;
    287}
    288
    289static void spi_mem_access_end(struct spi_mem *mem)
    290{
    291	struct spi_controller *ctlr = mem->spi->controller;
    292
    293	mutex_unlock(&ctlr->io_mutex);
    294	mutex_unlock(&ctlr->bus_lock_mutex);
    295
    296	if (ctlr->auto_runtime_pm)
    297		pm_runtime_put(ctlr->dev.parent);
    298}
    299
    300/**
    301 * spi_mem_exec_op() - Execute a memory operation
    302 * @mem: the SPI memory
    303 * @op: the memory operation to execute
    304 *
    305 * Executes a memory operation.
    306 *
    307 * This function first checks that @op is supported and then tries to execute
    308 * it.
    309 *
    310 * Return: 0 in case of success, a negative error code otherwise.
    311 */
    312int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
    313{
    314	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
    315	struct spi_controller *ctlr = mem->spi->controller;
    316	struct spi_transfer xfers[4] = { };
    317	struct spi_message msg;
    318	u8 *tmpbuf;
    319	int ret;
    320
    321	ret = spi_mem_check_op(op);
    322	if (ret)
    323		return ret;
    324
    325	if (!spi_mem_internal_supports_op(mem, op))
    326		return -ENOTSUPP;
    327
    328	if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
    329		ret = spi_mem_access_start(mem);
    330		if (ret)
    331			return ret;
    332
    333		ret = ctlr->mem_ops->exec_op(mem, op);
    334
    335		spi_mem_access_end(mem);
    336
    337		/*
    338		 * Some controllers only optimize specific paths (typically the
    339		 * read path) and expect the core to use the regular SPI
    340		 * interface in other cases.
    341		 */
    342		if (!ret || ret != -ENOTSUPP)
    343			return ret;
    344	}
    345
    346	tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
    347
    348	/*
    349	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
    350	 * we're guaranteed that this buffer is DMA-able, as required by the
    351	 * SPI layer.
    352	 */
    353	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
    354	if (!tmpbuf)
    355		return -ENOMEM;
    356
    357	spi_message_init(&msg);
    358
    359	tmpbuf[0] = op->cmd.opcode;
    360	xfers[xferpos].tx_buf = tmpbuf;
    361	xfers[xferpos].len = op->cmd.nbytes;
    362	xfers[xferpos].tx_nbits = op->cmd.buswidth;
    363	spi_message_add_tail(&xfers[xferpos], &msg);
    364	xferpos++;
    365	totalxferlen++;
    366
    367	if (op->addr.nbytes) {
    368		int i;
    369
    370		for (i = 0; i < op->addr.nbytes; i++)
    371			tmpbuf[i + 1] = op->addr.val >>
    372					(8 * (op->addr.nbytes - i - 1));
    373
    374		xfers[xferpos].tx_buf = tmpbuf + 1;
    375		xfers[xferpos].len = op->addr.nbytes;
    376		xfers[xferpos].tx_nbits = op->addr.buswidth;
    377		spi_message_add_tail(&xfers[xferpos], &msg);
    378		xferpos++;
    379		totalxferlen += op->addr.nbytes;
    380	}
    381
    382	if (op->dummy.nbytes) {
    383		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
    384		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
    385		xfers[xferpos].len = op->dummy.nbytes;
    386		xfers[xferpos].tx_nbits = op->dummy.buswidth;
    387		xfers[xferpos].dummy_data = 1;
    388		spi_message_add_tail(&xfers[xferpos], &msg);
    389		xferpos++;
    390		totalxferlen += op->dummy.nbytes;
    391	}
    392
    393	if (op->data.nbytes) {
    394		if (op->data.dir == SPI_MEM_DATA_IN) {
    395			xfers[xferpos].rx_buf = op->data.buf.in;
    396			xfers[xferpos].rx_nbits = op->data.buswidth;
    397		} else {
    398			xfers[xferpos].tx_buf = op->data.buf.out;
    399			xfers[xferpos].tx_nbits = op->data.buswidth;
    400		}
    401
    402		xfers[xferpos].len = op->data.nbytes;
    403		spi_message_add_tail(&xfers[xferpos], &msg);
    404		xferpos++;
    405		totalxferlen += op->data.nbytes;
    406	}
    407
    408	ret = spi_sync(mem->spi, &msg);
    409
    410	kfree(tmpbuf);
    411
    412	if (ret)
    413		return ret;
    414
    415	if (msg.actual_length != totalxferlen)
    416		return -EIO;
    417
    418	return 0;
    419}
    420EXPORT_SYMBOL_GPL(spi_mem_exec_op);
    421
    422/**
    423 * spi_mem_get_name() - Return the SPI mem device name to be used by the
    424 *			upper layer if necessary
    425 * @mem: the SPI memory
    426 *
    427 * This function allows SPI mem users to retrieve the SPI mem device name.
    428 * It is useful if the upper layer needs to expose a custom name for
    429 * compatibility reasons.
    430 *
    431 * Return: a string containing the name of the memory device to be used
    432 *	   by the SPI mem user
    433 */
    434const char *spi_mem_get_name(struct spi_mem *mem)
    435{
    436	return mem->name;
    437}
    438EXPORT_SYMBOL_GPL(spi_mem_get_name);
    439
    440/**
    441 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
    442 *			      match controller limitations
    443 * @mem: the SPI memory
    444 * @op: the operation to adjust
    445 *
    446 * Some controllers have FIFO limitations and must split a data transfer
    447 * operation into multiple ones, others require a specific alignment for
    448 * optimized accesses. This function allows SPI mem drivers to split a single
    449 * operation into multiple sub-operations when required.
    450 *
    451 * Return: a negative error code if the controller can't properly adjust @op,
    452 *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
    453 *	   can't be handled in a single step.
    454 */
    455int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
    456{
    457	struct spi_controller *ctlr = mem->spi->controller;
    458	size_t len;
    459
    460	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
    461		return ctlr->mem_ops->adjust_op_size(mem, op);
    462
    463	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
    464		len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
    465
    466		if (len > spi_max_transfer_size(mem->spi))
    467			return -EINVAL;
    468
    469		op->data.nbytes = min3((size_t)op->data.nbytes,
    470				       spi_max_transfer_size(mem->spi),
    471				       spi_max_message_size(mem->spi) -
    472				       len);
    473		if (!op->data.nbytes)
    474			return -EINVAL;
    475	}
    476
    477	return 0;
    478}
    479EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
    480
    481static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
    482				      u64 offs, size_t len, void *buf)
    483{
    484	struct spi_mem_op op = desc->info.op_tmpl;
    485	int ret;
    486
    487	op.addr.val = desc->info.offset + offs;
    488	op.data.buf.in = buf;
    489	op.data.nbytes = len;
    490	ret = spi_mem_adjust_op_size(desc->mem, &op);
    491	if (ret)
    492		return ret;
    493
    494	ret = spi_mem_exec_op(desc->mem, &op);
    495	if (ret)
    496		return ret;
    497
    498	return op.data.nbytes;
    499}
    500
    501static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
    502				       u64 offs, size_t len, const void *buf)
    503{
    504	struct spi_mem_op op = desc->info.op_tmpl;
    505	int ret;
    506
    507	op.addr.val = desc->info.offset + offs;
    508	op.data.buf.out = buf;
    509	op.data.nbytes = len;
    510	ret = spi_mem_adjust_op_size(desc->mem, &op);
    511	if (ret)
    512		return ret;
    513
    514	ret = spi_mem_exec_op(desc->mem, &op);
    515	if (ret)
    516		return ret;
    517
    518	return op.data.nbytes;
    519}
    520
    521/**
    522 * spi_mem_dirmap_create() - Create a direct mapping descriptor
    523 * @mem: SPI mem device this direct mapping should be created for
    524 * @info: direct mapping information
    525 *
    526 * This function is creating a direct mapping descriptor which can then be used
    527 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
    528 * If the SPI controller driver does not support direct mapping, this function
    529 * falls back to an implementation using spi_mem_exec_op(), so that the caller
    530 * doesn't have to bother implementing a fallback on his own.
    531 *
    532 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
    533 */
    534struct spi_mem_dirmap_desc *
    535spi_mem_dirmap_create(struct spi_mem *mem,
    536		      const struct spi_mem_dirmap_info *info)
    537{
    538	struct spi_controller *ctlr = mem->spi->controller;
    539	struct spi_mem_dirmap_desc *desc;
    540	int ret = -ENOTSUPP;
    541
    542	/* Make sure the number of address cycles is between 1 and 8 bytes. */
    543	if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
    544		return ERR_PTR(-EINVAL);
    545
    546	/* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
    547	if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
    548		return ERR_PTR(-EINVAL);
    549
    550	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
    551	if (!desc)
    552		return ERR_PTR(-ENOMEM);
    553
    554	desc->mem = mem;
    555	desc->info = *info;
    556	if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
    557		ret = ctlr->mem_ops->dirmap_create(desc);
    558
    559	if (ret) {
    560		desc->nodirmap = true;
    561		if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
    562			ret = -ENOTSUPP;
    563		else
    564			ret = 0;
    565	}
    566
    567	if (ret) {
    568		kfree(desc);
    569		return ERR_PTR(ret);
    570	}
    571
    572	return desc;
    573}
    574EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
    575
    576/**
    577 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
    578 * @desc: the direct mapping descriptor to destroy
    579 *
    580 * This function destroys a direct mapping descriptor previously created by
    581 * spi_mem_dirmap_create().
    582 */
    583void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
    584{
    585	struct spi_controller *ctlr = desc->mem->spi->controller;
    586
    587	if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
    588		ctlr->mem_ops->dirmap_destroy(desc);
    589
    590	kfree(desc);
    591}
    592EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
    593
    594static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
    595{
    596	struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
    597
    598	spi_mem_dirmap_destroy(desc);
    599}
    600
    601/**
    602 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
    603 *				  it to a device
    604 * @dev: device the dirmap desc will be attached to
    605 * @mem: SPI mem device this direct mapping should be created for
    606 * @info: direct mapping information
    607 *
    608 * devm_ variant of the spi_mem_dirmap_create() function. See
    609 * spi_mem_dirmap_create() for more details.
    610 *
    611 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
    612 */
    613struct spi_mem_dirmap_desc *
    614devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
    615			   const struct spi_mem_dirmap_info *info)
    616{
    617	struct spi_mem_dirmap_desc **ptr, *desc;
    618
    619	ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
    620			   GFP_KERNEL);
    621	if (!ptr)
    622		return ERR_PTR(-ENOMEM);
    623
    624	desc = spi_mem_dirmap_create(mem, info);
    625	if (IS_ERR(desc)) {
    626		devres_free(ptr);
    627	} else {
    628		*ptr = desc;
    629		devres_add(dev, ptr);
    630	}
    631
    632	return desc;
    633}
    634EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
    635
    636static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
    637{
    638	struct spi_mem_dirmap_desc **ptr = res;
    639
    640	if (WARN_ON(!ptr || !*ptr))
    641		return 0;
    642
    643	return *ptr == data;
    644}
    645
    646/**
    647 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
    648 *				   to a device
    649 * @dev: device the dirmap desc is attached to
    650 * @desc: the direct mapping descriptor to destroy
    651 *
    652 * devm_ variant of the spi_mem_dirmap_destroy() function. See
    653 * spi_mem_dirmap_destroy() for more details.
    654 */
    655void devm_spi_mem_dirmap_destroy(struct device *dev,
    656				 struct spi_mem_dirmap_desc *desc)
    657{
    658	devres_release(dev, devm_spi_mem_dirmap_release,
    659		       devm_spi_mem_dirmap_match, desc);
    660}
    661EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
    662
    663/**
    664 * spi_mem_dirmap_read() - Read data through a direct mapping
    665 * @desc: direct mapping descriptor
    666 * @offs: offset to start reading from. Note that this is not an absolute
    667 *	  offset, but the offset within the direct mapping which already has
    668 *	  its own offset
    669 * @len: length in bytes
    670 * @buf: destination buffer. This buffer must be DMA-able
    671 *
    672 * This function reads data from a memory device using a direct mapping
    673 * previously instantiated with spi_mem_dirmap_create().
    674 *
    675 * Return: the amount of data read from the memory device or a negative error
    676 * code. Note that the returned size might be smaller than @len, and the caller
    677 * is responsible for calling spi_mem_dirmap_read() again when that happens.
    678 */
    679ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
    680			    u64 offs, size_t len, void *buf)
    681{
    682	struct spi_controller *ctlr = desc->mem->spi->controller;
    683	ssize_t ret;
    684
    685	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
    686		return -EINVAL;
    687
    688	if (!len)
    689		return 0;
    690
    691	if (desc->nodirmap) {
    692		ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
    693	} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
    694		ret = spi_mem_access_start(desc->mem);
    695		if (ret)
    696			return ret;
    697
    698		ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
    699
    700		spi_mem_access_end(desc->mem);
    701	} else {
    702		ret = -ENOTSUPP;
    703	}
    704
    705	return ret;
    706}
    707EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
    708
    709/**
    710 * spi_mem_dirmap_write() - Write data through a direct mapping
    711 * @desc: direct mapping descriptor
    712 * @offs: offset to start writing from. Note that this is not an absolute
    713 *	  offset, but the offset within the direct mapping which already has
    714 *	  its own offset
    715 * @len: length in bytes
    716 * @buf: source buffer. This buffer must be DMA-able
    717 *
    718 * This function writes data to a memory device using a direct mapping
    719 * previously instantiated with spi_mem_dirmap_create().
    720 *
    721 * Return: the amount of data written to the memory device or a negative error
    722 * code. Note that the returned size might be smaller than @len, and the caller
    723 * is responsible for calling spi_mem_dirmap_write() again when that happens.
    724 */
    725ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
    726			     u64 offs, size_t len, const void *buf)
    727{
    728	struct spi_controller *ctlr = desc->mem->spi->controller;
    729	ssize_t ret;
    730
    731	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
    732		return -EINVAL;
    733
    734	if (!len)
    735		return 0;
    736
    737	if (desc->nodirmap) {
    738		ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
    739	} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
    740		ret = spi_mem_access_start(desc->mem);
    741		if (ret)
    742			return ret;
    743
    744		ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
    745
    746		spi_mem_access_end(desc->mem);
    747	} else {
    748		ret = -ENOTSUPP;
    749	}
    750
    751	return ret;
    752}
    753EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
    754
    755static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
    756{
    757	return container_of(drv, struct spi_mem_driver, spidrv.driver);
    758}
    759
    760static int spi_mem_read_status(struct spi_mem *mem,
    761			       const struct spi_mem_op *op,
    762			       u16 *status)
    763{
    764	const u8 *bytes = (u8 *)op->data.buf.in;
    765	int ret;
    766
    767	ret = spi_mem_exec_op(mem, op);
    768	if (ret)
    769		return ret;
    770
    771	if (op->data.nbytes > 1)
    772		*status = ((u16)bytes[0] << 8) | bytes[1];
    773	else
    774		*status = bytes[0];
    775
    776	return 0;
    777}
    778
    779/**
    780 * spi_mem_poll_status() - Poll memory device status
    781 * @mem: SPI memory device
    782 * @op: the memory operation to execute
    783 * @mask: status bitmask to ckeck
    784 * @match: (status & mask) expected value
    785 * @initial_delay_us: delay in us before starting to poll
    786 * @polling_delay_us: time to sleep between reads in us
    787 * @timeout_ms: timeout in milliseconds
    788 *
    789 * This function polls a status register and returns when
    790 * (status & mask) == match or when the timeout has expired.
    791 *
    792 * Return: 0 in case of success, -ETIMEDOUT in case of error,
    793 *         -EOPNOTSUPP if not supported.
    794 */
    795int spi_mem_poll_status(struct spi_mem *mem,
    796			const struct spi_mem_op *op,
    797			u16 mask, u16 match,
    798			unsigned long initial_delay_us,
    799			unsigned long polling_delay_us,
    800			u16 timeout_ms)
    801{
    802	struct spi_controller *ctlr = mem->spi->controller;
    803	int ret = -EOPNOTSUPP;
    804	int read_status_ret;
    805	u16 status;
    806
    807	if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
    808	    op->data.dir != SPI_MEM_DATA_IN)
    809		return -EINVAL;
    810
    811	if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
    812		ret = spi_mem_access_start(mem);
    813		if (ret)
    814			return ret;
    815
    816		ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
    817						 initial_delay_us, polling_delay_us,
    818						 timeout_ms);
    819
    820		spi_mem_access_end(mem);
    821	}
    822
    823	if (ret == -EOPNOTSUPP) {
    824		if (!spi_mem_supports_op(mem, op))
    825			return ret;
    826
    827		if (initial_delay_us < 10)
    828			udelay(initial_delay_us);
    829		else
    830			usleep_range((initial_delay_us >> 2) + 1,
    831				     initial_delay_us);
    832
    833		ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
    834					(read_status_ret || ((status) & mask) == match),
    835					polling_delay_us, timeout_ms * 1000, false, mem,
    836					op, &status);
    837		if (read_status_ret)
    838			return read_status_ret;
    839	}
    840
    841	return ret;
    842}
    843EXPORT_SYMBOL_GPL(spi_mem_poll_status);
    844
    845static int spi_mem_probe(struct spi_device *spi)
    846{
    847	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
    848	struct spi_controller *ctlr = spi->controller;
    849	struct spi_mem *mem;
    850
    851	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
    852	if (!mem)
    853		return -ENOMEM;
    854
    855	mem->spi = spi;
    856
    857	if (ctlr->mem_ops && ctlr->mem_ops->get_name)
    858		mem->name = ctlr->mem_ops->get_name(mem);
    859	else
    860		mem->name = dev_name(&spi->dev);
    861
    862	if (IS_ERR_OR_NULL(mem->name))
    863		return PTR_ERR_OR_ZERO(mem->name);
    864
    865	spi_set_drvdata(spi, mem);
    866
    867	return memdrv->probe(mem);
    868}
    869
    870static void spi_mem_remove(struct spi_device *spi)
    871{
    872	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
    873	struct spi_mem *mem = spi_get_drvdata(spi);
    874
    875	if (memdrv->remove)
    876		memdrv->remove(mem);
    877}
    878
    879static void spi_mem_shutdown(struct spi_device *spi)
    880{
    881	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
    882	struct spi_mem *mem = spi_get_drvdata(spi);
    883
    884	if (memdrv->shutdown)
    885		memdrv->shutdown(mem);
    886}
    887
    888/**
    889 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
    890 * @memdrv: the SPI memory driver to register
    891 * @owner: the owner of this driver
    892 *
    893 * Registers a SPI memory driver.
    894 *
    895 * Return: 0 in case of success, a negative error core otherwise.
    896 */
    897
    898int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
    899				       struct module *owner)
    900{
    901	memdrv->spidrv.probe = spi_mem_probe;
    902	memdrv->spidrv.remove = spi_mem_remove;
    903	memdrv->spidrv.shutdown = spi_mem_shutdown;
    904
    905	return __spi_register_driver(owner, &memdrv->spidrv);
    906}
    907EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
    908
    909/**
    910 * spi_mem_driver_unregister() - Unregister a SPI memory driver
    911 * @memdrv: the SPI memory driver to unregister
    912 *
    913 * Unregisters a SPI memory driver.
    914 */
    915void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
    916{
    917	spi_unregister_driver(&memdrv->spidrv);
    918}
    919EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);