cipher.h (13337B)
1 2/* SPDX-License-Identifier: GPL-2.0-only */ 3/* 4 * Copyright 2016 Broadcom 5 */ 6 7#ifndef _CIPHER_H 8#define _CIPHER_H 9 10#include <linux/atomic.h> 11#include <linux/mailbox/brcm-message.h> 12#include <linux/mailbox_client.h> 13#include <crypto/aes.h> 14#include <crypto/internal/hash.h> 15#include <crypto/internal/skcipher.h> 16#include <crypto/aead.h> 17#include <crypto/arc4.h> 18#include <crypto/gcm.h> 19#include <crypto/sha1.h> 20#include <crypto/sha2.h> 21#include <crypto/sha3.h> 22 23#include "spu.h" 24#include "spum.h" 25#include "spu2.h" 26 27/* Driver supports up to MAX_SPUS SPU blocks */ 28#define MAX_SPUS 16 29 30#define ARC4_STATE_SIZE 4 31 32#define CCM_AES_IV_SIZE 16 33#define CCM_ESP_IV_SIZE 8 34#define RFC4543_ICV_SIZE 16 35 36#define MAX_KEY_SIZE ARC4_MAX_KEY_SIZE 37#define MAX_IV_SIZE AES_BLOCK_SIZE 38#define MAX_DIGEST_SIZE SHA3_512_DIGEST_SIZE 39#define MAX_ASSOC_SIZE 512 40 41/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */ 42#define GCM_ESP_SALT_SIZE 4 43#define CCM_ESP_SALT_SIZE 3 44#define MAX_SALT_SIZE GCM_ESP_SALT_SIZE 45#define GCM_ESP_SALT_OFFSET 0 46#define CCM_ESP_SALT_OFFSET 1 47 48#define GCM_ESP_DIGESTSIZE 16 49 50#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 51 52/* 53 * Maximum number of bytes from a non-final hash request that can be deferred 54 * until more data is available. With new crypto API framework, this 55 * can be no more than one block of data. 56 */ 57#define HASH_CARRY_MAX MAX_HASH_BLOCK_SIZE 58 59/* Force at least 4-byte alignment of all SPU message fields */ 60#define SPU_MSG_ALIGN 4 61 62/* Number of times to resend mailbox message if mb queue is full */ 63#define SPU_MB_RETRY_MAX 1000 64 65/* op_counts[] indexes */ 66enum op_type { 67 SPU_OP_CIPHER, 68 SPU_OP_HASH, 69 SPU_OP_HMAC, 70 SPU_OP_AEAD, 71 SPU_OP_NUM 72}; 73 74enum spu_spu_type { 75 SPU_TYPE_SPUM, 76 SPU_TYPE_SPU2, 77}; 78 79/* 80 * SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus, 81 * respectively. 82 */ 83enum spu_spu_subtype { 84 SPU_SUBTYPE_SPUM_NS2, 85 SPU_SUBTYPE_SPUM_NSP, 86 SPU_SUBTYPE_SPU2_V1, 87 SPU_SUBTYPE_SPU2_V2 88}; 89 90struct spu_type_subtype { 91 enum spu_spu_type type; 92 enum spu_spu_subtype subtype; 93}; 94 95struct cipher_op { 96 enum spu_cipher_alg alg; 97 enum spu_cipher_mode mode; 98}; 99 100struct auth_op { 101 enum hash_alg alg; 102 enum hash_mode mode; 103}; 104 105struct iproc_alg_s { 106 u32 type; 107 union { 108 struct skcipher_alg skcipher; 109 struct ahash_alg hash; 110 struct aead_alg aead; 111 } alg; 112 struct cipher_op cipher_info; 113 struct auth_op auth_info; 114 bool auth_first; 115 bool registered; 116}; 117 118/* 119 * Buffers for a SPU request/reply message pair. All part of one structure to 120 * allow a single alloc per request. 121 */ 122struct spu_msg_buf { 123 /* Request message fragments */ 124 125 /* 126 * SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC, 127 * and BD header. For SPU2, holds FMD, OMD. 128 */ 129 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)]; 130 131 /* IV or counter. Size to include salt. Also used for XTS tweek. */ 132 u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)]; 133 134 /* Hash digest. request and response. */ 135 u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)]; 136 137 /* SPU request message padding */ 138 u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)]; 139 140 /* SPU-M request message STATUS field */ 141 u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)]; 142 143 /* Response message fragments */ 144 145 /* SPU response message header */ 146 u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)]; 147 148 /* SPU response message STATUS field padding */ 149 u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)]; 150 151 /* SPU response message STATUS field */ 152 u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)]; 153 154 union { 155 /* Buffers only used for skcipher */ 156 struct { 157 /* 158 * Field used for either SUPDT when RC4 is used 159 * -OR- tweak value when XTS/AES is used 160 */ 161 u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)]; 162 } c; 163 164 /* Buffers only used for aead */ 165 struct { 166 /* SPU response pad for GCM data */ 167 u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)]; 168 169 /* SPU request msg padding for GCM AAD */ 170 u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)]; 171 172 /* SPU response data to be discarded */ 173 u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE, 174 SPU_MSG_ALIGN)]; 175 } a; 176 }; 177}; 178 179struct iproc_ctx_s { 180 u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE]; 181 unsigned int enckeylen; 182 183 u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE]; 184 unsigned int authkeylen; 185 186 u8 salt[MAX_SALT_SIZE]; 187 unsigned int salt_len; 188 unsigned int salt_offset; 189 u8 iv[MAX_IV_SIZE]; 190 191 unsigned int digestsize; 192 193 struct iproc_alg_s *alg; 194 bool is_esp; 195 196 struct cipher_op cipher; 197 enum spu_cipher_type cipher_type; 198 199 struct auth_op auth; 200 bool auth_first; 201 202 /* 203 * The maximum length in bytes of the payload in a SPU message for this 204 * context. For SPU-M, the payload is the combination of AAD and data. 205 * For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF 206 * indicates that there is no limit to the length of the SPU message 207 * payload. 208 */ 209 unsigned int max_payload; 210 211 struct crypto_aead *fallback_cipher; 212 213 /* auth_type is determined during processing of request */ 214 215 u8 ipad[MAX_HASH_BLOCK_SIZE]; 216 u8 opad[MAX_HASH_BLOCK_SIZE]; 217 218 /* 219 * Buffer to hold SPU message header template. Template is created at 220 * setkey time for skcipher requests, since most of the fields in the 221 * header are known at that time. At request time, just fill in a few 222 * missing pieces related to length of data in the request and IVs, etc. 223 */ 224 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)]; 225 226 /* Length of SPU request header */ 227 u16 spu_req_hdr_len; 228 229 /* Expected length of SPU response header */ 230 u16 spu_resp_hdr_len; 231 232 /* 233 * shash descriptor - needed to perform incremental hashing in 234 * in software, when hw doesn't support it. 235 */ 236 struct shash_desc *shash; 237 238 bool is_rfc4543; /* RFC 4543 style of GMAC */ 239}; 240 241/* state from iproc_reqctx_s necessary for hash state export/import */ 242struct spu_hash_export_s { 243 unsigned int total_todo; 244 unsigned int total_sent; 245 u8 hash_carry[HASH_CARRY_MAX]; 246 unsigned int hash_carry_len; 247 u8 incr_hash[MAX_DIGEST_SIZE]; 248 bool is_sw_hmac; 249}; 250 251struct iproc_reqctx_s { 252 /* general context */ 253 struct crypto_async_request *parent; 254 255 /* only valid after enqueue() */ 256 struct iproc_ctx_s *ctx; 257 258 u8 chan_idx; /* Mailbox channel to be used to submit this request */ 259 260 /* total todo, rx'd, and sent for this request */ 261 unsigned int total_todo; 262 unsigned int total_received; /* only valid for skcipher */ 263 unsigned int total_sent; 264 265 /* 266 * num bytes sent to hw from the src sg in this request. This can differ 267 * from total_sent for incremental hashing. total_sent includes previous 268 * init() and update() data. src_sent does not. 269 */ 270 unsigned int src_sent; 271 272 /* 273 * For AEAD requests, start of associated data. This will typically 274 * point to the beginning of the src scatterlist from the request, 275 * since assoc data is at the beginning of the src scatterlist rather 276 * than in its own sg. 277 */ 278 struct scatterlist *assoc; 279 280 /* 281 * scatterlist entry and offset to start of data for next chunk. Crypto 282 * API src scatterlist for AEAD starts with AAD, if present. For first 283 * chunk, src_sg is sg entry at beginning of input data (after AAD). 284 * src_skip begins at the offset in that sg entry where data begins. 285 */ 286 struct scatterlist *src_sg; 287 int src_nents; /* Number of src entries with data */ 288 u32 src_skip; /* bytes of current sg entry already used */ 289 290 /* 291 * Same for destination. For AEAD, if there is AAD, output data must 292 * be written at offset following AAD. 293 */ 294 struct scatterlist *dst_sg; 295 int dst_nents; /* Number of dst entries with data */ 296 u32 dst_skip; /* bytes of current sg entry already written */ 297 298 /* Mailbox message used to send this request to PDC driver */ 299 struct brcm_message mb_mssg; 300 301 bool bd_suppress; /* suppress BD field in SPU response? */ 302 303 /* cipher context */ 304 bool is_encrypt; 305 306 /* 307 * CBC mode: IV. CTR mode: counter. Else empty. Used as a DMA 308 * buffer for AEAD requests. So allocate as DMAable memory. If IV 309 * concatenated with salt, includes the salt. 310 */ 311 u8 *iv_ctr; 312 /* Length of IV or counter, in bytes */ 313 unsigned int iv_ctr_len; 314 315 /* 316 * Hash requests can be of any size, whether initial, update, or final. 317 * A non-final request must be submitted to the SPU as an integral 318 * number of blocks. This may leave data at the end of the request 319 * that is not a full block. Since the request is non-final, it cannot 320 * be padded. So, we write the remainder to this hash_carry buffer and 321 * hold it until the next request arrives. The carry data is then 322 * submitted at the beginning of the data in the next SPU msg. 323 * hash_carry_len is the number of bytes currently in hash_carry. These 324 * fields are only used for ahash requests. 325 */ 326 u8 hash_carry[HASH_CARRY_MAX]; 327 unsigned int hash_carry_len; 328 unsigned int is_final; /* is this the final for the hash op? */ 329 330 /* 331 * Digest from incremental hash is saved here to include in next hash 332 * operation. Cannot be stored in req->result for truncated hashes, 333 * since result may be sized for final digest. Cannot be saved in 334 * msg_buf because that gets deleted between incremental hash ops 335 * and is not saved as part of export(). 336 */ 337 u8 incr_hash[MAX_DIGEST_SIZE]; 338 339 /* hmac context */ 340 bool is_sw_hmac; 341 342 /* aead context */ 343 struct crypto_tfm *old_tfm; 344 crypto_completion_t old_complete; 345 void *old_data; 346 347 gfp_t gfp; 348 349 /* Buffers used to build SPU request and response messages */ 350 struct spu_msg_buf msg_buf; 351}; 352 353/* 354 * Structure encapsulates a set of function pointers specific to the type of 355 * SPU hardware running. These functions handling creation and parsing of 356 * SPU request messages and SPU response messages. Includes hardware-specific 357 * values read from device tree. 358 */ 359struct spu_hw { 360 void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len); 361 u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg, 362 enum spu_cipher_mode cipher_mode, 363 unsigned int blocksize); 364 u32 (*spu_payload_length)(u8 *spu_hdr); 365 u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len, 366 bool is_hash); 367 u16 (*spu_hash_pad_len)(enum hash_alg hash_alg, 368 enum hash_mode hash_mode, u32 chunksize, 369 u16 hash_block_size); 370 u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode, 371 unsigned int data_size); 372 u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode, 373 unsigned int assoc_len, 374 unsigned int iv_len, bool is_encrypt); 375 u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode, 376 u16 iv_len); 377 enum hash_type (*spu_hash_type)(u32 src_sent); 378 u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg, 379 enum hash_type); 380 u32 (*spu_create_request)(u8 *spu_hdr, 381 struct spu_request_opts *req_opts, 382 struct spu_cipher_parms *cipher_parms, 383 struct spu_hash_parms *hash_parms, 384 struct spu_aead_parms *aead_parms, 385 unsigned int data_size); 386 u16 (*spu_cipher_req_init)(u8 *spu_hdr, 387 struct spu_cipher_parms *cipher_parms); 388 void (*spu_cipher_req_finish)(u8 *spu_hdr, 389 u16 spu_req_hdr_len, 390 unsigned int is_inbound, 391 struct spu_cipher_parms *cipher_parms, 392 unsigned int data_size); 393 void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding, 394 u32 hash_pad_len, enum hash_alg auth_alg, 395 enum hash_mode auth_mode, 396 unsigned int total_sent, u32 status_padding); 397 u8 (*spu_xts_tweak_in_payload)(void); 398 u8 (*spu_tx_status_len)(void); 399 u8 (*spu_rx_status_len)(void); 400 int (*spu_status_process)(u8 *statp); 401 void (*spu_ccm_update_iv)(unsigned int digestsize, 402 struct spu_cipher_parms *cipher_parms, 403 unsigned int assoclen, unsigned int chunksize, 404 bool is_encrypt, bool is_esp); 405 u32 (*spu_wordalign_padlen)(u32 data_size); 406 407 /* The base virtual address of the SPU hw registers */ 408 void __iomem *reg_vbase[MAX_SPUS]; 409 410 /* Version of the SPU hardware */ 411 enum spu_spu_type spu_type; 412 413 /* Sub-version of the SPU hardware */ 414 enum spu_spu_subtype spu_subtype; 415 416 /* The number of SPUs on this platform */ 417 u32 num_spu; 418 419 /* The number of SPU channels on this platform */ 420 u32 num_chan; 421}; 422 423struct bcm_device_private { 424 struct platform_device *pdev; 425 426 struct spu_hw spu; 427 428 atomic_t session_count; /* number of streams active */ 429 atomic_t stream_count; /* monotonic counter for streamID's */ 430 431 /* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */ 432 u8 bcm_hdr_len; 433 434 /* The index of the channel to use for the next crypto request */ 435 atomic_t next_chan; 436 437 struct dentry *debugfs_dir; 438 struct dentry *debugfs_stats; 439 440 /* Number of request bytes processed and result bytes returned */ 441 atomic64_t bytes_in; 442 atomic64_t bytes_out; 443 444 /* Number of operations of each type */ 445 atomic_t op_counts[SPU_OP_NUM]; 446 447 atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST]; 448 atomic_t hash_cnt[HASH_ALG_LAST]; 449 atomic_t hmac_cnt[HASH_ALG_LAST]; 450 atomic_t aead_cnt[AEAD_TYPE_LAST]; 451 452 /* Number of calls to setkey() for each operation type */ 453 atomic_t setkey_cnt[SPU_OP_NUM]; 454 455 /* Number of times request was resubmitted because mb was full */ 456 atomic_t mb_no_spc; 457 458 /* Number of mailbox send failures */ 459 atomic_t mb_send_fail; 460 461 /* Number of ICV check failures for AEAD messages */ 462 atomic_t bad_icv; 463 464 struct mbox_client mcl; 465 466 /* Array of mailbox channel pointers, one for each channel */ 467 struct mbox_chan **mbox; 468}; 469 470extern struct bcm_device_private iproc_priv; 471 472#endif