cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cesa.h (25449B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __MARVELL_CESA_H__
      3#define __MARVELL_CESA_H__
      4
      5#include <crypto/internal/hash.h>
      6#include <crypto/internal/skcipher.h>
      7
      8#include <linux/dma-direction.h>
      9#include <linux/dmapool.h>
     10
     11#define CESA_ENGINE_OFF(i)			(((i) * 0x2000))
     12
     13#define CESA_TDMA_BYTE_CNT			0x800
     14#define CESA_TDMA_SRC_ADDR			0x810
     15#define CESA_TDMA_DST_ADDR			0x820
     16#define CESA_TDMA_NEXT_ADDR			0x830
     17
     18#define CESA_TDMA_CONTROL			0x840
     19#define CESA_TDMA_DST_BURST			GENMASK(2, 0)
     20#define CESA_TDMA_DST_BURST_32B			3
     21#define CESA_TDMA_DST_BURST_128B		4
     22#define CESA_TDMA_OUT_RD_EN			BIT(4)
     23#define CESA_TDMA_SRC_BURST			GENMASK(8, 6)
     24#define CESA_TDMA_SRC_BURST_32B			(3 << 6)
     25#define CESA_TDMA_SRC_BURST_128B		(4 << 6)
     26#define CESA_TDMA_CHAIN				BIT(9)
     27#define CESA_TDMA_BYTE_SWAP			BIT(11)
     28#define CESA_TDMA_NO_BYTE_SWAP			BIT(11)
     29#define CESA_TDMA_EN				BIT(12)
     30#define CESA_TDMA_FETCH_ND			BIT(13)
     31#define CESA_TDMA_ACT				BIT(14)
     32
     33#define CESA_TDMA_CUR				0x870
     34#define CESA_TDMA_ERROR_CAUSE			0x8c8
     35#define CESA_TDMA_ERROR_MSK			0x8cc
     36
     37#define CESA_TDMA_WINDOW_BASE(x)		(((x) * 0x8) + 0xa00)
     38#define CESA_TDMA_WINDOW_CTRL(x)		(((x) * 0x8) + 0xa04)
     39
     40#define CESA_IVDIG(x)				(0xdd00 + ((x) * 4) +	\
     41						 (((x) < 5) ? 0 : 0x14))
     42
     43#define CESA_SA_CMD				0xde00
     44#define CESA_SA_CMD_EN_CESA_SA_ACCL0		BIT(0)
     45#define CESA_SA_CMD_EN_CESA_SA_ACCL1		BIT(1)
     46#define CESA_SA_CMD_DISABLE_SEC			BIT(2)
     47
     48#define CESA_SA_DESC_P0				0xde04
     49
     50#define CESA_SA_DESC_P1				0xde14
     51
     52#define CESA_SA_CFG				0xde08
     53#define CESA_SA_CFG_STOP_DIG_ERR		GENMASK(1, 0)
     54#define CESA_SA_CFG_DIG_ERR_CONT		0
     55#define CESA_SA_CFG_DIG_ERR_SKIP		1
     56#define CESA_SA_CFG_DIG_ERR_STOP		3
     57#define CESA_SA_CFG_CH0_W_IDMA			BIT(7)
     58#define CESA_SA_CFG_CH1_W_IDMA			BIT(8)
     59#define CESA_SA_CFG_ACT_CH0_IDMA		BIT(9)
     60#define CESA_SA_CFG_ACT_CH1_IDMA		BIT(10)
     61#define CESA_SA_CFG_MULTI_PKT			BIT(11)
     62#define CESA_SA_CFG_PARA_DIS			BIT(13)
     63
     64#define CESA_SA_ACCEL_STATUS			0xde0c
     65#define CESA_SA_ST_ACT_0			BIT(0)
     66#define CESA_SA_ST_ACT_1			BIT(1)
     67
     68/*
     69 * CESA_SA_FPGA_INT_STATUS looks like an FPGA leftover and is documented only
     70 * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
     71 * and someone forgot to remove  it while switching to the core and moving to
     72 * CESA_SA_INT_STATUS.
     73 */
     74#define CESA_SA_FPGA_INT_STATUS			0xdd68
     75#define CESA_SA_INT_STATUS			0xde20
     76#define CESA_SA_INT_AUTH_DONE			BIT(0)
     77#define CESA_SA_INT_DES_E_DONE			BIT(1)
     78#define CESA_SA_INT_AES_E_DONE			BIT(2)
     79#define CESA_SA_INT_AES_D_DONE			BIT(3)
     80#define CESA_SA_INT_ENC_DONE			BIT(4)
     81#define CESA_SA_INT_ACCEL0_DONE			BIT(5)
     82#define CESA_SA_INT_ACCEL1_DONE			BIT(6)
     83#define CESA_SA_INT_ACC0_IDMA_DONE		BIT(7)
     84#define CESA_SA_INT_ACC1_IDMA_DONE		BIT(8)
     85#define CESA_SA_INT_IDMA_DONE			BIT(9)
     86#define CESA_SA_INT_IDMA_OWN_ERR		BIT(10)
     87
     88#define CESA_SA_INT_MSK				0xde24
     89
     90#define CESA_SA_DESC_CFG_OP_MAC_ONLY		0
     91#define CESA_SA_DESC_CFG_OP_CRYPT_ONLY		1
     92#define CESA_SA_DESC_CFG_OP_MAC_CRYPT		2
     93#define CESA_SA_DESC_CFG_OP_CRYPT_MAC		3
     94#define CESA_SA_DESC_CFG_OP_MSK			GENMASK(1, 0)
     95#define CESA_SA_DESC_CFG_MACM_SHA256		(1 << 4)
     96#define CESA_SA_DESC_CFG_MACM_HMAC_SHA256	(3 << 4)
     97#define CESA_SA_DESC_CFG_MACM_MD5		(4 << 4)
     98#define CESA_SA_DESC_CFG_MACM_SHA1		(5 << 4)
     99#define CESA_SA_DESC_CFG_MACM_HMAC_MD5		(6 << 4)
    100#define CESA_SA_DESC_CFG_MACM_HMAC_SHA1		(7 << 4)
    101#define CESA_SA_DESC_CFG_MACM_MSK		GENMASK(6, 4)
    102#define CESA_SA_DESC_CFG_CRYPTM_DES		(1 << 8)
    103#define CESA_SA_DESC_CFG_CRYPTM_3DES		(2 << 8)
    104#define CESA_SA_DESC_CFG_CRYPTM_AES		(3 << 8)
    105#define CESA_SA_DESC_CFG_CRYPTM_MSK		GENMASK(9, 8)
    106#define CESA_SA_DESC_CFG_DIR_ENC		(0 << 12)
    107#define CESA_SA_DESC_CFG_DIR_DEC		(1 << 12)
    108#define CESA_SA_DESC_CFG_CRYPTCM_ECB		(0 << 16)
    109#define CESA_SA_DESC_CFG_CRYPTCM_CBC		(1 << 16)
    110#define CESA_SA_DESC_CFG_CRYPTCM_MSK		BIT(16)
    111#define CESA_SA_DESC_CFG_3DES_EEE		(0 << 20)
    112#define CESA_SA_DESC_CFG_3DES_EDE		(1 << 20)
    113#define CESA_SA_DESC_CFG_AES_LEN_128		(0 << 24)
    114#define CESA_SA_DESC_CFG_AES_LEN_192		(1 << 24)
    115#define CESA_SA_DESC_CFG_AES_LEN_256		(2 << 24)
    116#define CESA_SA_DESC_CFG_AES_LEN_MSK		GENMASK(25, 24)
    117#define CESA_SA_DESC_CFG_NOT_FRAG		(0 << 30)
    118#define CESA_SA_DESC_CFG_FIRST_FRAG		(1 << 30)
    119#define CESA_SA_DESC_CFG_LAST_FRAG		(2 << 30)
    120#define CESA_SA_DESC_CFG_MID_FRAG		(3 << 30)
    121#define CESA_SA_DESC_CFG_FRAG_MSK		GENMASK(31, 30)
    122
    123/*
    124 * /-----------\ 0
    125 * | ACCEL CFG |	4 * 8
    126 * |-----------| 0x20
    127 * | CRYPT KEY |	8 * 4
    128 * |-----------| 0x40
    129 * |  IV   IN  |	4 * 4
    130 * |-----------| 0x40 (inplace)
    131 * |  IV BUF   |	4 * 4
    132 * |-----------| 0x80
    133 * |  DATA IN  |	16 * x (max ->max_req_size)
    134 * |-----------| 0x80 (inplace operation)
    135 * |  DATA OUT |	16 * x (max ->max_req_size)
    136 * \-----------/ SRAM size
    137 */
    138
    139/*
    140 * Hashing memory map:
    141 * /-----------\ 0
    142 * | ACCEL CFG |        4 * 8
    143 * |-----------| 0x20
    144 * | Inner IV  |        8 * 4
    145 * |-----------| 0x40
    146 * | Outer IV  |        8 * 4
    147 * |-----------| 0x60
    148 * | Output BUF|        8 * 4
    149 * |-----------| 0x80
    150 * |  DATA IN  |        64 * x (max ->max_req_size)
    151 * \-----------/ SRAM size
    152 */
    153
    154#define CESA_SA_CFG_SRAM_OFFSET			0x00
    155#define CESA_SA_DATA_SRAM_OFFSET		0x80
    156
    157#define CESA_SA_CRYPT_KEY_SRAM_OFFSET		0x20
    158#define CESA_SA_CRYPT_IV_SRAM_OFFSET		0x40
    159
    160#define CESA_SA_MAC_IIV_SRAM_OFFSET		0x20
    161#define CESA_SA_MAC_OIV_SRAM_OFFSET		0x40
    162#define CESA_SA_MAC_DIG_SRAM_OFFSET		0x60
    163
    164#define CESA_SA_DESC_CRYPT_DATA(offset)					\
    165	cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) |		\
    166		    ((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16))
    167
    168#define CESA_SA_DESC_CRYPT_IV(offset)					\
    169	cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) |	\
    170		    ((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16))
    171
    172#define CESA_SA_DESC_CRYPT_KEY(offset)					\
    173	cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset))
    174
    175#define CESA_SA_DESC_MAC_DATA(offset)					\
    176	cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset))
    177#define CESA_SA_DESC_MAC_DATA_MSK		cpu_to_le32(GENMASK(15, 0))
    178
    179#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len)	cpu_to_le32((total_len) << 16)
    180#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK		cpu_to_le32(GENMASK(31, 16))
    181
    182#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX	0xffff
    183
    184#define CESA_SA_DESC_MAC_DIGEST(offset)					\
    185	cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset))
    186#define CESA_SA_DESC_MAC_DIGEST_MSK		cpu_to_le32(GENMASK(15, 0))
    187
    188#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len)	cpu_to_le32((frag_len) << 16)
    189#define CESA_SA_DESC_MAC_FRAG_LEN_MSK		cpu_to_le32(GENMASK(31, 16))
    190
    191#define CESA_SA_DESC_MAC_IV(offset)					\
    192	cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) |		\
    193		    ((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16))
    194
    195#define CESA_SA_SRAM_SIZE			2048
    196#define CESA_SA_SRAM_PAYLOAD_SIZE		(cesa_dev->sram_size - \
    197						 CESA_SA_DATA_SRAM_OFFSET)
    198
    199#define CESA_SA_DEFAULT_SRAM_SIZE		2048
    200#define CESA_SA_MIN_SRAM_SIZE			1024
    201
    202#define CESA_SA_SRAM_MSK			(2048 - 1)
    203
    204#define CESA_MAX_HASH_BLOCK_SIZE		64
    205#define CESA_HASH_BLOCK_SIZE_MSK		(CESA_MAX_HASH_BLOCK_SIZE - 1)
    206
    207/**
    208 * struct mv_cesa_sec_accel_desc - security accelerator descriptor
    209 * @config:	engine config
    210 * @enc_p:	input and output data pointers for a cipher operation
    211 * @enc_len:	cipher operation length
    212 * @enc_key_p:	cipher key pointer
    213 * @enc_iv:	cipher IV pointers
    214 * @mac_src_p:	input pointer and total hash length
    215 * @mac_digest:	digest pointer and hash operation length
    216 * @mac_iv:	hmac IV pointers
    217 *
    218 * Structure passed to the CESA engine to describe the crypto operation
    219 * to be executed.
    220 */
    221struct mv_cesa_sec_accel_desc {
    222	__le32 config;
    223	__le32 enc_p;
    224	__le32 enc_len;
    225	__le32 enc_key_p;
    226	__le32 enc_iv;
    227	__le32 mac_src_p;
    228	__le32 mac_digest;
    229	__le32 mac_iv;
    230};
    231
    232/**
    233 * struct mv_cesa_skcipher_op_ctx - cipher operation context
    234 * @key:	cipher key
    235 * @iv:		cipher IV
    236 *
    237 * Context associated to a cipher operation.
    238 */
    239struct mv_cesa_skcipher_op_ctx {
    240	__le32 key[8];
    241	u32 iv[4];
    242};
    243
    244/**
    245 * struct mv_cesa_hash_op_ctx - hash or hmac operation context
    246 * @key:	cipher key
    247 * @iv:		cipher IV
    248 *
    249 * Context associated to an hash or hmac operation.
    250 */
    251struct mv_cesa_hash_op_ctx {
    252	u32 iv[16];
    253	__le32 hash[8];
    254};
    255
    256/**
    257 * struct mv_cesa_op_ctx - crypto operation context
    258 * @desc:	CESA descriptor
    259 * @ctx:	context associated to the crypto operation
    260 *
    261 * Context associated to a crypto operation.
    262 */
    263struct mv_cesa_op_ctx {
    264	struct mv_cesa_sec_accel_desc desc;
    265	union {
    266		struct mv_cesa_skcipher_op_ctx skcipher;
    267		struct mv_cesa_hash_op_ctx hash;
    268	} ctx;
    269};
    270
    271/* TDMA descriptor flags */
    272#define CESA_TDMA_DST_IN_SRAM			BIT(31)
    273#define CESA_TDMA_SRC_IN_SRAM			BIT(30)
    274#define CESA_TDMA_END_OF_REQ			BIT(29)
    275#define CESA_TDMA_BREAK_CHAIN			BIT(28)
    276#define CESA_TDMA_SET_STATE			BIT(27)
    277#define CESA_TDMA_TYPE_MSK			GENMASK(26, 0)
    278#define CESA_TDMA_DUMMY				0
    279#define CESA_TDMA_DATA				1
    280#define CESA_TDMA_OP				2
    281#define CESA_TDMA_RESULT			3
    282
    283/**
    284 * struct mv_cesa_tdma_desc - TDMA descriptor
    285 * @byte_cnt:	number of bytes to transfer
    286 * @src:	DMA address of the source
    287 * @dst:	DMA address of the destination
    288 * @next_dma:	DMA address of the next TDMA descriptor
    289 * @cur_dma:	DMA address of this TDMA descriptor
    290 * @next:	pointer to the next TDMA descriptor
    291 * @op:		CESA operation attached to this TDMA descriptor
    292 * @data:	raw data attached to this TDMA descriptor
    293 * @flags:	flags describing the TDMA transfer. See the
    294 *		"TDMA descriptor flags" section above
    295 *
    296 * TDMA descriptor used to create a transfer chain describing a crypto
    297 * operation.
    298 */
    299struct mv_cesa_tdma_desc {
    300	__le32 byte_cnt;
    301	union {
    302		__le32 src;
    303		u32 src_dma;
    304	};
    305	union {
    306		__le32 dst;
    307		u32 dst_dma;
    308	};
    309	__le32 next_dma;
    310
    311	/* Software state */
    312	dma_addr_t cur_dma;
    313	struct mv_cesa_tdma_desc *next;
    314	union {
    315		struct mv_cesa_op_ctx *op;
    316		void *data;
    317	};
    318	u32 flags;
    319};
    320
    321/**
    322 * struct mv_cesa_sg_dma_iter - scatter-gather iterator
    323 * @dir:	transfer direction
    324 * @sg:		scatter list
    325 * @offset:	current position in the scatter list
    326 * @op_offset:	current position in the crypto operation
    327 *
    328 * Iterator used to iterate over a scatterlist while creating a TDMA chain for
    329 * a crypto operation.
    330 */
    331struct mv_cesa_sg_dma_iter {
    332	enum dma_data_direction dir;
    333	struct scatterlist *sg;
    334	unsigned int offset;
    335	unsigned int op_offset;
    336};
    337
    338/**
    339 * struct mv_cesa_dma_iter - crypto operation iterator
    340 * @len:	the crypto operation length
    341 * @offset:	current position in the crypto operation
    342 * @op_len:	sub-operation length (the crypto engine can only act on 2kb
    343 *		chunks)
    344 *
    345 * Iterator used to create a TDMA chain for a given crypto operation.
    346 */
    347struct mv_cesa_dma_iter {
    348	unsigned int len;
    349	unsigned int offset;
    350	unsigned int op_len;
    351};
    352
    353/**
    354 * struct mv_cesa_tdma_chain - TDMA chain
    355 * @first:	first entry in the TDMA chain
    356 * @last:	last entry in the TDMA chain
    357 *
    358 * Stores a TDMA chain for a specific crypto operation.
    359 */
    360struct mv_cesa_tdma_chain {
    361	struct mv_cesa_tdma_desc *first;
    362	struct mv_cesa_tdma_desc *last;
    363};
    364
    365struct mv_cesa_engine;
    366
    367/**
    368 * struct mv_cesa_caps - CESA device capabilities
    369 * @engines:		number of engines
    370 * @has_tdma:		whether this device has a TDMA block
    371 * @cipher_algs:	supported cipher algorithms
    372 * @ncipher_algs:	number of supported cipher algorithms
    373 * @ahash_algs:		supported hash algorithms
    374 * @nahash_algs:	number of supported hash algorithms
    375 *
    376 * Structure used to describe CESA device capabilities.
    377 */
    378struct mv_cesa_caps {
    379	int nengines;
    380	bool has_tdma;
    381	struct skcipher_alg **cipher_algs;
    382	int ncipher_algs;
    383	struct ahash_alg **ahash_algs;
    384	int nahash_algs;
    385};
    386
    387/**
    388 * struct mv_cesa_dev_dma - DMA pools
    389 * @tdma_desc_pool:	TDMA desc pool
    390 * @op_pool:		crypto operation pool
    391 * @cache_pool:		data cache pool (used by hash implementation when the
    392 *			hash request is smaller than the hash block size)
    393 * @padding_pool:	padding pool (used by hash implementation when hardware
    394 *			padding cannot be used)
    395 *
    396 * Structure containing the different DMA pools used by this driver.
    397 */
    398struct mv_cesa_dev_dma {
    399	struct dma_pool *tdma_desc_pool;
    400	struct dma_pool *op_pool;
    401	struct dma_pool *cache_pool;
    402	struct dma_pool *padding_pool;
    403};
    404
    405/**
    406 * struct mv_cesa_dev - CESA device
    407 * @caps:	device capabilities
    408 * @regs:	device registers
    409 * @sram_size:	usable SRAM size
    410 * @lock:	device lock
    411 * @engines:	array of engines
    412 * @dma:	dma pools
    413 *
    414 * Structure storing CESA device information.
    415 */
    416struct mv_cesa_dev {
    417	const struct mv_cesa_caps *caps;
    418	void __iomem *regs;
    419	struct device *dev;
    420	unsigned int sram_size;
    421	spinlock_t lock;
    422	struct mv_cesa_engine *engines;
    423	struct mv_cesa_dev_dma *dma;
    424};
    425
    426/**
    427 * struct mv_cesa_engine - CESA engine
    428 * @id:			engine id
    429 * @regs:		engine registers
    430 * @sram:		SRAM memory region
    431 * @sram_pool:		SRAM memory region from pool
    432 * @sram_dma:		DMA address of the SRAM memory region
    433 * @lock:		engine lock
    434 * @req:		current crypto request
    435 * @clk:		engine clk
    436 * @zclk:		engine zclk
    437 * @max_req_len:	maximum chunk length (useful to create the TDMA chain)
    438 * @int_mask:		interrupt mask cache
    439 * @pool:		memory pool pointing to the memory region reserved in
    440 *			SRAM
    441 * @queue:		fifo of the pending crypto requests
    442 * @load:		engine load counter, useful for load balancing
    443 * @chain:		list of the current tdma descriptors being processed
    444 *			by this engine.
    445 * @complete_queue:	fifo of the processed requests by the engine
    446 *
    447 * Structure storing CESA engine information.
    448 */
    449struct mv_cesa_engine {
    450	int id;
    451	void __iomem *regs;
    452	union {
    453		void __iomem *sram;
    454		void *sram_pool;
    455	};
    456	dma_addr_t sram_dma;
    457	spinlock_t lock;
    458	struct crypto_async_request *req;
    459	struct clk *clk;
    460	struct clk *zclk;
    461	size_t max_req_len;
    462	u32 int_mask;
    463	struct gen_pool *pool;
    464	struct crypto_queue queue;
    465	atomic_t load;
    466	struct mv_cesa_tdma_chain chain;
    467	struct list_head complete_queue;
    468	int irq;
    469};
    470
    471/**
    472 * struct mv_cesa_req_ops - CESA request operations
    473 * @process:	process a request chunk result (should return 0 if the
    474 *		operation, -EINPROGRESS if it needs more steps or an error
    475 *		code)
    476 * @step:	launch the crypto operation on the next chunk
    477 * @cleanup:	cleanup the crypto request (release associated data)
    478 * @complete:	complete the request, i.e copy result or context from sram when
    479 *		needed.
    480 */
    481struct mv_cesa_req_ops {
    482	int (*process)(struct crypto_async_request *req, u32 status);
    483	void (*step)(struct crypto_async_request *req);
    484	void (*cleanup)(struct crypto_async_request *req);
    485	void (*complete)(struct crypto_async_request *req);
    486};
    487
    488/**
    489 * struct mv_cesa_ctx - CESA operation context
    490 * @ops:	crypto operations
    491 *
    492 * Base context structure inherited by operation specific ones.
    493 */
    494struct mv_cesa_ctx {
    495	const struct mv_cesa_req_ops *ops;
    496};
    497
    498/**
    499 * struct mv_cesa_hash_ctx - CESA hash operation context
    500 * @base:	base context structure
    501 *
    502 * Hash context structure.
    503 */
    504struct mv_cesa_hash_ctx {
    505	struct mv_cesa_ctx base;
    506};
    507
    508/**
    509 * struct mv_cesa_hash_ctx - CESA hmac operation context
    510 * @base:	base context structure
    511 * @iv:		initialization vectors
    512 *
    513 * HMAC context structure.
    514 */
    515struct mv_cesa_hmac_ctx {
    516	struct mv_cesa_ctx base;
    517	__be32 iv[16];
    518};
    519
    520/**
    521 * enum mv_cesa_req_type - request type definitions
    522 * @CESA_STD_REQ:	standard request
    523 * @CESA_DMA_REQ:	DMA request
    524 */
    525enum mv_cesa_req_type {
    526	CESA_STD_REQ,
    527	CESA_DMA_REQ,
    528};
    529
    530/**
    531 * struct mv_cesa_req - CESA request
    532 * @engine:	engine associated with this request
    533 * @chain:	list of tdma descriptors associated  with this request
    534 */
    535struct mv_cesa_req {
    536	struct mv_cesa_engine *engine;
    537	struct mv_cesa_tdma_chain chain;
    538};
    539
    540/**
    541 * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
    542 *				requests
    543 * @iter:	sg mapping iterator
    544 * @offset:	current offset in the SG entry mapped in memory
    545 */
    546struct mv_cesa_sg_std_iter {
    547	struct sg_mapping_iter iter;
    548	unsigned int offset;
    549};
    550
    551/**
    552 * struct mv_cesa_skcipher_std_req - cipher standard request
    553 * @op:		operation context
    554 * @offset:	current operation offset
    555 * @size:	size of the crypto operation
    556 */
    557struct mv_cesa_skcipher_std_req {
    558	struct mv_cesa_op_ctx op;
    559	unsigned int offset;
    560	unsigned int size;
    561	bool skip_ctx;
    562};
    563
    564/**
    565 * struct mv_cesa_skcipher_req - cipher request
    566 * @req:	type specific request information
    567 * @src_nents:	number of entries in the src sg list
    568 * @dst_nents:	number of entries in the dest sg list
    569 */
    570struct mv_cesa_skcipher_req {
    571	struct mv_cesa_req base;
    572	struct mv_cesa_skcipher_std_req std;
    573	int src_nents;
    574	int dst_nents;
    575};
    576
    577/**
    578 * struct mv_cesa_ahash_std_req - standard hash request
    579 * @offset:	current operation offset
    580 */
    581struct mv_cesa_ahash_std_req {
    582	unsigned int offset;
    583};
    584
    585/**
    586 * struct mv_cesa_ahash_dma_req - DMA hash request
    587 * @padding:		padding buffer
    588 * @padding_dma:	DMA address of the padding buffer
    589 * @cache_dma:		DMA address of the cache buffer
    590 */
    591struct mv_cesa_ahash_dma_req {
    592	u8 *padding;
    593	dma_addr_t padding_dma;
    594	u8 *cache;
    595	dma_addr_t cache_dma;
    596};
    597
    598/**
    599 * struct mv_cesa_ahash_req - hash request
    600 * @req:		type specific request information
    601 * @cache:		cache buffer
    602 * @cache_ptr:		write pointer in the cache buffer
    603 * @len:		hash total length
    604 * @src_nents:		number of entries in the scatterlist
    605 * @last_req:		define whether the current operation is the last one
    606 *			or not
    607 * @state:		hash state
    608 */
    609struct mv_cesa_ahash_req {
    610	struct mv_cesa_req base;
    611	union {
    612		struct mv_cesa_ahash_dma_req dma;
    613		struct mv_cesa_ahash_std_req std;
    614	} req;
    615	struct mv_cesa_op_ctx op_tmpl;
    616	u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
    617	unsigned int cache_ptr;
    618	u64 len;
    619	int src_nents;
    620	bool last_req;
    621	bool algo_le;
    622	u32 state[8];
    623};
    624
    625/* CESA functions */
    626
    627extern struct mv_cesa_dev *cesa_dev;
    628
    629
    630static inline void
    631mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
    632					struct crypto_async_request *req)
    633{
    634	list_add_tail(&req->list, &engine->complete_queue);
    635}
    636
    637static inline struct crypto_async_request *
    638mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
    639{
    640	struct crypto_async_request *req;
    641
    642	req = list_first_entry_or_null(&engine->complete_queue,
    643				       struct crypto_async_request,
    644				       list);
    645	if (req)
    646		list_del(&req->list);
    647
    648	return req;
    649}
    650
    651
    652static inline enum mv_cesa_req_type
    653mv_cesa_req_get_type(struct mv_cesa_req *req)
    654{
    655	return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
    656}
    657
    658static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
    659					 u32 cfg, u32 mask)
    660{
    661	op->desc.config &= cpu_to_le32(~mask);
    662	op->desc.config |= cpu_to_le32(cfg);
    663}
    664
    665static inline u32 mv_cesa_get_op_cfg(const struct mv_cesa_op_ctx *op)
    666{
    667	return le32_to_cpu(op->desc.config);
    668}
    669
    670static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg)
    671{
    672	op->desc.config = cpu_to_le32(cfg);
    673}
    674
    675static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine,
    676				     struct mv_cesa_op_ctx *op)
    677{
    678	u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK;
    679
    680	op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset);
    681	op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset);
    682	op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset);
    683	op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK;
    684	op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset);
    685	op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK;
    686	op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset);
    687	op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset);
    688}
    689
    690static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len)
    691{
    692	op->desc.enc_len = cpu_to_le32(len);
    693}
    694
    695static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op,
    696						int len)
    697{
    698	op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK;
    699	op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len);
    700}
    701
    702static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op,
    703					       int len)
    704{
    705	op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK;
    706	op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len);
    707}
    708
    709static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine,
    710					u32 int_mask)
    711{
    712	if (int_mask == engine->int_mask)
    713		return;
    714
    715	writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK);
    716	engine->int_mask = int_mask;
    717}
    718
    719static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
    720{
    721	return engine->int_mask;
    722}
    723
    724static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
    725{
    726	return (mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) ==
    727		CESA_SA_DESC_CFG_FIRST_FRAG;
    728}
    729
    730int mv_cesa_queue_req(struct crypto_async_request *req,
    731		      struct mv_cesa_req *creq);
    732
    733struct crypto_async_request *
    734mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
    735			   struct crypto_async_request **backlog);
    736
    737static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
    738{
    739	int i;
    740	u32 min_load = U32_MAX;
    741	struct mv_cesa_engine *selected = NULL;
    742
    743	for (i = 0; i < cesa_dev->caps->nengines; i++) {
    744		struct mv_cesa_engine *engine = cesa_dev->engines + i;
    745		u32 load = atomic_read(&engine->load);
    746
    747		if (load < min_load) {
    748			min_load = load;
    749			selected = engine;
    750		}
    751	}
    752
    753	atomic_add(weight, &selected->load);
    754
    755	return selected;
    756}
    757
    758/*
    759 * Helper function that indicates whether a crypto request needs to be
    760 * cleaned up or not after being enqueued using mv_cesa_queue_req().
    761 */
    762static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
    763					    int ret)
    764{
    765	/*
    766	 * The queue still had some space, the request was queued
    767	 * normally, so there's no need to clean it up.
    768	 */
    769	if (ret == -EINPROGRESS)
    770		return false;
    771
    772	/*
    773	 * The queue had not space left, but since the request is
    774	 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
    775	 * the backlog and will be processed later. There's no need to
    776	 * clean it up.
    777	 */
    778	if (ret == -EBUSY)
    779		return false;
    780
    781	/* Request wasn't queued, we need to clean it up */
    782	return true;
    783}
    784
    785/* TDMA functions */
    786
    787static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
    788					     unsigned int len)
    789{
    790	iter->len = len;
    791	iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
    792	iter->offset = 0;
    793}
    794
    795static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
    796					    struct scatterlist *sg,
    797					    enum dma_data_direction dir)
    798{
    799	iter->op_offset = 0;
    800	iter->offset = 0;
    801	iter->sg = sg;
    802	iter->dir = dir;
    803}
    804
    805static inline unsigned int
    806mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
    807				  struct mv_cesa_sg_dma_iter *sgiter)
    808{
    809	return min(iter->op_len - sgiter->op_offset,
    810		   sg_dma_len(sgiter->sg) - sgiter->offset);
    811}
    812
    813bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain,
    814					struct mv_cesa_sg_dma_iter *sgiter,
    815					unsigned int len);
    816
    817static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
    818{
    819	iter->offset += iter->op_len;
    820	iter->op_len = min(iter->len - iter->offset,
    821			   CESA_SA_SRAM_PAYLOAD_SIZE);
    822
    823	return iter->op_len;
    824}
    825
    826void mv_cesa_dma_step(struct mv_cesa_req *dreq);
    827
    828static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
    829				      u32 status)
    830{
    831	if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
    832		return -EINPROGRESS;
    833
    834	if (status & CESA_SA_INT_IDMA_OWN_ERR)
    835		return -EINVAL;
    836
    837	return 0;
    838}
    839
    840void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
    841			 struct mv_cesa_engine *engine);
    842void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
    843void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
    844			struct mv_cesa_req *dreq);
    845int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
    846
    847
    848static inline void
    849mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
    850{
    851	memset(chain, 0, sizeof(*chain));
    852}
    853
    854int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
    855			  u32 size, u32 flags, gfp_t gfp_flags);
    856
    857struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
    858					const struct mv_cesa_op_ctx *op_templ,
    859					bool skip_ctx,
    860					gfp_t flags);
    861
    862int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
    863				  dma_addr_t dst, dma_addr_t src, u32 size,
    864				  u32 flags, gfp_t gfp_flags);
    865
    866int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags);
    867int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags);
    868
    869int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
    870				 struct mv_cesa_dma_iter *dma_iter,
    871				 struct mv_cesa_sg_dma_iter *sgiter,
    872				 gfp_t gfp_flags);
    873
    874size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
    875		       struct scatterlist *sgl, unsigned int nents,
    876		       unsigned int sram_off, size_t buflen, off_t skip,
    877		       bool to_sram);
    878
    879static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
    880					     struct scatterlist *sgl,
    881					     unsigned int nents,
    882					     unsigned int sram_off,
    883					     size_t buflen, off_t skip)
    884{
    885	return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
    886			       true);
    887}
    888
    889static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
    890					       struct scatterlist *sgl,
    891					       unsigned int nents,
    892					       unsigned int sram_off,
    893					       size_t buflen, off_t skip)
    894{
    895	return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
    896			       false);
    897}
    898
    899/* Algorithm definitions */
    900
    901extern struct ahash_alg mv_md5_alg;
    902extern struct ahash_alg mv_sha1_alg;
    903extern struct ahash_alg mv_sha256_alg;
    904extern struct ahash_alg mv_ahmac_md5_alg;
    905extern struct ahash_alg mv_ahmac_sha1_alg;
    906extern struct ahash_alg mv_ahmac_sha256_alg;
    907
    908extern struct skcipher_alg mv_cesa_ecb_des_alg;
    909extern struct skcipher_alg mv_cesa_cbc_des_alg;
    910extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
    911extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
    912extern struct skcipher_alg mv_cesa_ecb_aes_alg;
    913extern struct skcipher_alg mv_cesa_cbc_aes_alg;
    914
    915#endif /* __MARVELL_CESA_H__ */