ixp4xx_crypto.c (40824B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Intel IXP4xx NPE-C crypto driver 4 * 5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> 6 */ 7 8#include <linux/platform_device.h> 9#include <linux/dma-mapping.h> 10#include <linux/dmapool.h> 11#include <linux/crypto.h> 12#include <linux/kernel.h> 13#include <linux/rtnetlink.h> 14#include <linux/interrupt.h> 15#include <linux/spinlock.h> 16#include <linux/gfp.h> 17#include <linux/module.h> 18#include <linux/of.h> 19 20#include <crypto/ctr.h> 21#include <crypto/internal/des.h> 22#include <crypto/aes.h> 23#include <crypto/hmac.h> 24#include <crypto/sha1.h> 25#include <crypto/algapi.h> 26#include <crypto/internal/aead.h> 27#include <crypto/internal/skcipher.h> 28#include <crypto/authenc.h> 29#include <crypto/scatterwalk.h> 30 31#include <linux/soc/ixp4xx/npe.h> 32#include <linux/soc/ixp4xx/qmgr.h> 33 34/* Intermittent includes, delete this after v5.14-rc1 */ 35#include <linux/soc/ixp4xx/cpu.h> 36 37#define MAX_KEYLEN 32 38 39/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ 40#define NPE_CTX_LEN 80 41#define AES_BLOCK128 16 42 43#define NPE_OP_HASH_VERIFY 0x01 44#define NPE_OP_CCM_ENABLE 0x04 45#define NPE_OP_CRYPT_ENABLE 0x08 46#define NPE_OP_HASH_ENABLE 0x10 47#define NPE_OP_NOT_IN_PLACE 0x20 48#define NPE_OP_HMAC_DISABLE 0x40 49#define NPE_OP_CRYPT_ENCRYPT 0x80 50 51#define NPE_OP_CCM_GEN_MIC 0xcc 52#define NPE_OP_HASH_GEN_ICV 0x50 53#define NPE_OP_ENC_GEN_KEY 0xc9 54 55#define MOD_ECB 0x0000 56#define MOD_CTR 0x1000 57#define MOD_CBC_ENC 0x2000 58#define MOD_CBC_DEC 0x3000 59#define MOD_CCM_ENC 0x4000 60#define MOD_CCM_DEC 0x5000 61 62#define KEYLEN_128 4 63#define KEYLEN_192 6 64#define KEYLEN_256 8 65 66#define CIPH_DECR 0x0000 67#define CIPH_ENCR 0x0400 68 69#define MOD_DES 0x0000 70#define MOD_TDEA2 0x0100 71#define MOD_3DES 0x0200 72#define MOD_AES 0x0800 73#define MOD_AES128 (0x0800 | KEYLEN_128) 74#define MOD_AES192 (0x0900 | KEYLEN_192) 75#define MOD_AES256 (0x0a00 | KEYLEN_256) 76 77#define MAX_IVLEN 16 78#define NPE_QLEN 16 79/* Space for registering when the first 80 * NPE_QLEN crypt_ctl are busy */ 81#define NPE_QLEN_TOTAL 64 82 83#define CTL_FLAG_UNUSED 0x0000 84#define CTL_FLAG_USED 0x1000 85#define CTL_FLAG_PERFORM_ABLK 0x0001 86#define CTL_FLAG_GEN_ICV 0x0002 87#define CTL_FLAG_GEN_REVAES 0x0004 88#define CTL_FLAG_PERFORM_AEAD 0x0008 89#define CTL_FLAG_MASK 0x000f 90 91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE 92 93#define MD5_DIGEST_SIZE 16 94 95struct buffer_desc { 96 u32 phys_next; 97#ifdef __ARMEB__ 98 u16 buf_len; 99 u16 pkt_len; 100#else 101 u16 pkt_len; 102 u16 buf_len; 103#endif 104 dma_addr_t phys_addr; 105 u32 __reserved[4]; 106 struct buffer_desc *next; 107 enum dma_data_direction dir; 108}; 109 110struct crypt_ctl { 111#ifdef __ARMEB__ 112 u8 mode; /* NPE_OP_* operation mode */ 113 u8 init_len; 114 u16 reserved; 115#else 116 u16 reserved; 117 u8 init_len; 118 u8 mode; /* NPE_OP_* operation mode */ 119#endif 120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 121 dma_addr_t icv_rev_aes; /* icv or rev aes */ 122 dma_addr_t src_buf; 123 dma_addr_t dst_buf; 124#ifdef __ARMEB__ 125 u16 auth_offs; /* Authentication start offset */ 126 u16 auth_len; /* Authentication data length */ 127 u16 crypt_offs; /* Cryption start offset */ 128 u16 crypt_len; /* Cryption data length */ 129#else 130 u16 auth_len; /* Authentication data length */ 131 u16 auth_offs; /* Authentication start offset */ 132 u16 crypt_len; /* Cryption data length */ 133 u16 crypt_offs; /* Cryption start offset */ 134#endif 135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ 136 u32 crypto_ctx; /* NPE Crypto Param structure address */ 137 138 /* Used by Host: 4*4 bytes*/ 139 unsigned int ctl_flags; 140 union { 141 struct skcipher_request *ablk_req; 142 struct aead_request *aead_req; 143 struct crypto_tfm *tfm; 144 } data; 145 struct buffer_desc *regist_buf; 146 u8 *regist_ptr; 147}; 148 149struct ablk_ctx { 150 struct buffer_desc *src; 151 struct buffer_desc *dst; 152 u8 iv[MAX_IVLEN]; 153 bool encrypt; 154 struct skcipher_request fallback_req; // keep at the end 155}; 156 157struct aead_ctx { 158 struct buffer_desc *src; 159 struct buffer_desc *dst; 160 struct scatterlist ivlist; 161 /* used when the hmac is not on one sg entry */ 162 u8 *hmac_virt; 163 int encrypt; 164}; 165 166struct ix_hash_algo { 167 u32 cfgword; 168 unsigned char *icv; 169}; 170 171struct ix_sa_dir { 172 unsigned char *npe_ctx; 173 dma_addr_t npe_ctx_phys; 174 int npe_ctx_idx; 175 u8 npe_mode; 176}; 177 178struct ixp_ctx { 179 struct ix_sa_dir encrypt; 180 struct ix_sa_dir decrypt; 181 int authkey_len; 182 u8 authkey[MAX_KEYLEN]; 183 int enckey_len; 184 u8 enckey[MAX_KEYLEN]; 185 u8 salt[MAX_IVLEN]; 186 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 187 unsigned int salted; 188 atomic_t configuring; 189 struct completion completion; 190 struct crypto_skcipher *fallback_tfm; 191}; 192 193struct ixp_alg { 194 struct skcipher_alg crypto; 195 const struct ix_hash_algo *hash; 196 u32 cfg_enc; 197 u32 cfg_dec; 198 199 int registered; 200}; 201 202struct ixp_aead_alg { 203 struct aead_alg crypto; 204 const struct ix_hash_algo *hash; 205 u32 cfg_enc; 206 u32 cfg_dec; 207 208 int registered; 209}; 210 211static const struct ix_hash_algo hash_alg_md5 = { 212 .cfgword = 0xAA010004, 213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 215}; 216 217static const struct ix_hash_algo hash_alg_sha1 = { 218 .cfgword = 0x00000005, 219 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" 220 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", 221}; 222 223static struct npe *npe_c; 224 225static unsigned int send_qid; 226static unsigned int recv_qid; 227static struct dma_pool *buffer_pool; 228static struct dma_pool *ctx_pool; 229 230static struct crypt_ctl *crypt_virt; 231static dma_addr_t crypt_phys; 232 233static int support_aes = 1; 234 235static struct platform_device *pdev; 236 237static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 238{ 239 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); 240} 241 242static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) 243{ 244 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); 245} 246 247static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) 248{ 249 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc; 250} 251 252static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) 253{ 254 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec; 255} 256 257static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) 258{ 259 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash; 260} 261 262static int setup_crypt_desc(void) 263{ 264 struct device *dev = &pdev->dev; 265 266 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 267 crypt_virt = dma_alloc_coherent(dev, 268 NPE_QLEN * sizeof(struct crypt_ctl), 269 &crypt_phys, GFP_ATOMIC); 270 if (!crypt_virt) 271 return -ENOMEM; 272 return 0; 273} 274 275static DEFINE_SPINLOCK(desc_lock); 276static struct crypt_ctl *get_crypt_desc(void) 277{ 278 int i; 279 static int idx; 280 unsigned long flags; 281 282 spin_lock_irqsave(&desc_lock, flags); 283 284 if (unlikely(!crypt_virt)) 285 setup_crypt_desc(); 286 if (unlikely(!crypt_virt)) { 287 spin_unlock_irqrestore(&desc_lock, flags); 288 return NULL; 289 } 290 i = idx; 291 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 292 if (++idx >= NPE_QLEN) 293 idx = 0; 294 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 295 spin_unlock_irqrestore(&desc_lock, flags); 296 return crypt_virt + i; 297 } else { 298 spin_unlock_irqrestore(&desc_lock, flags); 299 return NULL; 300 } 301} 302 303static DEFINE_SPINLOCK(emerg_lock); 304static struct crypt_ctl *get_crypt_desc_emerg(void) 305{ 306 int i; 307 static int idx = NPE_QLEN; 308 struct crypt_ctl *desc; 309 unsigned long flags; 310 311 desc = get_crypt_desc(); 312 if (desc) 313 return desc; 314 if (unlikely(!crypt_virt)) 315 return NULL; 316 317 spin_lock_irqsave(&emerg_lock, flags); 318 i = idx; 319 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 320 if (++idx >= NPE_QLEN_TOTAL) 321 idx = NPE_QLEN; 322 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 323 spin_unlock_irqrestore(&emerg_lock, flags); 324 return crypt_virt + i; 325 } else { 326 spin_unlock_irqrestore(&emerg_lock, flags); 327 return NULL; 328 } 329} 330 331static void free_buf_chain(struct device *dev, struct buffer_desc *buf, 332 dma_addr_t phys) 333{ 334 while (buf) { 335 struct buffer_desc *buf1; 336 u32 phys1; 337 338 buf1 = buf->next; 339 phys1 = buf->phys_next; 340 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir); 341 dma_pool_free(buffer_pool, buf, phys); 342 buf = buf1; 343 phys = phys1; 344 } 345} 346 347static struct tasklet_struct crypto_done_tasklet; 348 349static void finish_scattered_hmac(struct crypt_ctl *crypt) 350{ 351 struct aead_request *req = crypt->data.aead_req; 352 struct aead_ctx *req_ctx = aead_request_ctx(req); 353 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 354 int authsize = crypto_aead_authsize(tfm); 355 int decryptlen = req->assoclen + req->cryptlen - authsize; 356 357 if (req_ctx->encrypt) { 358 scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst, 359 decryptlen, authsize, 1); 360 } 361 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); 362} 363 364static void one_packet(dma_addr_t phys) 365{ 366 struct device *dev = &pdev->dev; 367 struct crypt_ctl *crypt; 368 struct ixp_ctx *ctx; 369 int failed; 370 371 failed = phys & 0x1 ? -EBADMSG : 0; 372 phys &= ~0x3; 373 crypt = crypt_phys2virt(phys); 374 375 switch (crypt->ctl_flags & CTL_FLAG_MASK) { 376 case CTL_FLAG_PERFORM_AEAD: { 377 struct aead_request *req = crypt->data.aead_req; 378 struct aead_ctx *req_ctx = aead_request_ctx(req); 379 380 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 381 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 382 if (req_ctx->hmac_virt) 383 finish_scattered_hmac(crypt); 384 385 req->base.complete(&req->base, failed); 386 break; 387 } 388 case CTL_FLAG_PERFORM_ABLK: { 389 struct skcipher_request *req = crypt->data.ablk_req; 390 struct ablk_ctx *req_ctx = skcipher_request_ctx(req); 391 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 392 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 393 unsigned int offset; 394 395 if (ivsize > 0) { 396 offset = req->cryptlen - ivsize; 397 if (req_ctx->encrypt) { 398 scatterwalk_map_and_copy(req->iv, req->dst, 399 offset, ivsize, 0); 400 } else { 401 memcpy(req->iv, req_ctx->iv, ivsize); 402 memzero_explicit(req_ctx->iv, ivsize); 403 } 404 } 405 406 if (req_ctx->dst) 407 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 408 409 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 410 req->base.complete(&req->base, failed); 411 break; 412 } 413 case CTL_FLAG_GEN_ICV: 414 ctx = crypto_tfm_ctx(crypt->data.tfm); 415 dma_pool_free(ctx_pool, crypt->regist_ptr, 416 crypt->regist_buf->phys_addr); 417 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); 418 if (atomic_dec_and_test(&ctx->configuring)) 419 complete(&ctx->completion); 420 break; 421 case CTL_FLAG_GEN_REVAES: 422 ctx = crypto_tfm_ctx(crypt->data.tfm); 423 *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); 424 if (atomic_dec_and_test(&ctx->configuring)) 425 complete(&ctx->completion); 426 break; 427 default: 428 BUG(); 429 } 430 crypt->ctl_flags = CTL_FLAG_UNUSED; 431} 432 433static void irqhandler(void *_unused) 434{ 435 tasklet_schedule(&crypto_done_tasklet); 436} 437 438static void crypto_done_action(unsigned long arg) 439{ 440 int i; 441 442 for (i = 0; i < 4; i++) { 443 dma_addr_t phys = qmgr_get_entry(recv_qid); 444 if (!phys) 445 return; 446 one_packet(phys); 447 } 448 tasklet_schedule(&crypto_done_tasklet); 449} 450 451static int init_ixp_crypto(struct device *dev) 452{ 453 struct device_node *np = dev->of_node; 454 u32 msg[2] = { 0, 0 }; 455 int ret = -ENODEV; 456 u32 npe_id; 457 458 dev_info(dev, "probing...\n"); 459 460 /* Locate the NPE and queue manager to use from device tree */ 461 if (IS_ENABLED(CONFIG_OF) && np) { 462 struct of_phandle_args queue_spec; 463 struct of_phandle_args npe_spec; 464 465 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 466 1, 0, &npe_spec); 467 if (ret) { 468 dev_err(dev, "no NPE engine specified\n"); 469 return -ENODEV; 470 } 471 npe_id = npe_spec.args[0]; 472 473 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, 474 &queue_spec); 475 if (ret) { 476 dev_err(dev, "no rx queue phandle\n"); 477 return -ENODEV; 478 } 479 recv_qid = queue_spec.args[0]; 480 481 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, 482 &queue_spec); 483 if (ret) { 484 dev_err(dev, "no txready queue phandle\n"); 485 return -ENODEV; 486 } 487 send_qid = queue_spec.args[0]; 488 } else { 489 /* 490 * Hardcoded engine when using platform data, this goes away 491 * when we switch to using DT only. 492 */ 493 npe_id = 2; 494 send_qid = 29; 495 recv_qid = 30; 496 } 497 498 npe_c = npe_request(npe_id); 499 if (!npe_c) 500 return ret; 501 502 if (!npe_running(npe_c)) { 503 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); 504 if (ret) 505 goto npe_release; 506 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 507 goto npe_error; 508 } else { 509 if (npe_send_message(npe_c, msg, "STATUS_MSG")) 510 goto npe_error; 511 512 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 513 goto npe_error; 514 } 515 516 switch ((msg[1] >> 16) & 0xff) { 517 case 3: 518 dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c)); 519 support_aes = 0; 520 break; 521 case 4: 522 case 5: 523 support_aes = 1; 524 break; 525 default: 526 dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c)); 527 ret = -ENODEV; 528 goto npe_release; 529 } 530 /* buffer_pool will also be used to sometimes store the hmac, 531 * so assure it is large enough 532 */ 533 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); 534 buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc), 535 32, 0); 536 ret = -ENOMEM; 537 if (!buffer_pool) 538 goto err; 539 540 ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0); 541 if (!ctx_pool) 542 goto err; 543 544 ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0, 545 "ixp_crypto:out", NULL); 546 if (ret) 547 goto err; 548 ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0, 549 "ixp_crypto:in", NULL); 550 if (ret) { 551 qmgr_release_queue(send_qid); 552 goto err; 553 } 554 qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); 555 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); 556 557 qmgr_enable_irq(recv_qid); 558 return 0; 559 560npe_error: 561 dev_err(dev, "%s not responding\n", npe_name(npe_c)); 562 ret = -EIO; 563err: 564 dma_pool_destroy(ctx_pool); 565 dma_pool_destroy(buffer_pool); 566npe_release: 567 npe_release(npe_c); 568 return ret; 569} 570 571static void release_ixp_crypto(struct device *dev) 572{ 573 qmgr_disable_irq(recv_qid); 574 tasklet_kill(&crypto_done_tasklet); 575 576 qmgr_release_queue(send_qid); 577 qmgr_release_queue(recv_qid); 578 579 dma_pool_destroy(ctx_pool); 580 dma_pool_destroy(buffer_pool); 581 582 npe_release(npe_c); 583 584 if (crypt_virt) 585 dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl), 586 crypt_virt, crypt_phys); 587} 588 589static void reset_sa_dir(struct ix_sa_dir *dir) 590{ 591 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 592 dir->npe_ctx_idx = 0; 593 dir->npe_mode = 0; 594} 595 596static int init_sa_dir(struct ix_sa_dir *dir) 597{ 598 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); 599 if (!dir->npe_ctx) 600 return -ENOMEM; 601 602 reset_sa_dir(dir); 603 return 0; 604} 605 606static void free_sa_dir(struct ix_sa_dir *dir) 607{ 608 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 609 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); 610} 611 612static int init_tfm(struct crypto_tfm *tfm) 613{ 614 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 615 int ret; 616 617 atomic_set(&ctx->configuring, 0); 618 ret = init_sa_dir(&ctx->encrypt); 619 if (ret) 620 return ret; 621 ret = init_sa_dir(&ctx->decrypt); 622 if (ret) 623 free_sa_dir(&ctx->encrypt); 624 625 return ret; 626} 627 628static int init_tfm_ablk(struct crypto_skcipher *tfm) 629{ 630 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm); 631 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm); 632 const char *name = crypto_tfm_alg_name(ctfm); 633 634 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 635 if (IS_ERR(ctx->fallback_tfm)) { 636 pr_err("ERROR: Cannot allocate fallback for %s %ld\n", 637 name, PTR_ERR(ctx->fallback_tfm)); 638 return PTR_ERR(ctx->fallback_tfm); 639 } 640 641 pr_info("Fallback for %s is %s\n", 642 crypto_tfm_alg_driver_name(&tfm->base), 643 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm)) 644 ); 645 646 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm)); 647 return init_tfm(crypto_skcipher_tfm(tfm)); 648} 649 650static int init_tfm_aead(struct crypto_aead *tfm) 651{ 652 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx)); 653 return init_tfm(crypto_aead_tfm(tfm)); 654} 655 656static void exit_tfm(struct crypto_tfm *tfm) 657{ 658 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 659 660 free_sa_dir(&ctx->encrypt); 661 free_sa_dir(&ctx->decrypt); 662} 663 664static void exit_tfm_ablk(struct crypto_skcipher *tfm) 665{ 666 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm); 667 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm); 668 669 crypto_free_skcipher(ctx->fallback_tfm); 670 exit_tfm(crypto_skcipher_tfm(tfm)); 671} 672 673static void exit_tfm_aead(struct crypto_aead *tfm) 674{ 675 exit_tfm(crypto_aead_tfm(tfm)); 676} 677 678static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, 679 int init_len, u32 ctx_addr, const u8 *key, 680 int key_len) 681{ 682 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 683 struct crypt_ctl *crypt; 684 struct buffer_desc *buf; 685 int i; 686 u8 *pad; 687 dma_addr_t pad_phys, buf_phys; 688 689 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 690 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); 691 if (!pad) 692 return -ENOMEM; 693 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); 694 if (!buf) { 695 dma_pool_free(ctx_pool, pad, pad_phys); 696 return -ENOMEM; 697 } 698 crypt = get_crypt_desc_emerg(); 699 if (!crypt) { 700 dma_pool_free(ctx_pool, pad, pad_phys); 701 dma_pool_free(buffer_pool, buf, buf_phys); 702 return -EAGAIN; 703 } 704 705 memcpy(pad, key, key_len); 706 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); 707 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) 708 pad[i] ^= xpad; 709 710 crypt->data.tfm = tfm; 711 crypt->regist_ptr = pad; 712 crypt->regist_buf = buf; 713 714 crypt->auth_offs = 0; 715 crypt->auth_len = HMAC_PAD_BLOCKLEN; 716 crypt->crypto_ctx = ctx_addr; 717 crypt->src_buf = buf_phys; 718 crypt->icv_rev_aes = target; 719 crypt->mode = NPE_OP_HASH_GEN_ICV; 720 crypt->init_len = init_len; 721 crypt->ctl_flags |= CTL_FLAG_GEN_ICV; 722 723 buf->next = 0; 724 buf->buf_len = HMAC_PAD_BLOCKLEN; 725 buf->pkt_len = 0; 726 buf->phys_addr = pad_phys; 727 728 atomic_inc(&ctx->configuring); 729 qmgr_put_entry(send_qid, crypt_virt2phys(crypt)); 730 BUG_ON(qmgr_stat_overflow(send_qid)); 731 return 0; 732} 733 734static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize, 735 const u8 *key, int key_len, unsigned int digest_len) 736{ 737 u32 itarget, otarget, npe_ctx_addr; 738 unsigned char *cinfo; 739 int init_len, ret = 0; 740 u32 cfgword; 741 struct ix_sa_dir *dir; 742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 743 const struct ix_hash_algo *algo; 744 745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 746 cinfo = dir->npe_ctx + dir->npe_ctx_idx; 747 algo = ix_hash(tfm); 748 749 /* write cfg word to cryptinfo */ 750 cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */ 751#ifndef __ARMEB__ 752 cfgword ^= 0xAA000000; /* change the "byte swap" flags */ 753#endif 754 *(u32 *)cinfo = cpu_to_be32(cfgword); 755 cinfo += sizeof(cfgword); 756 757 /* write ICV to cryptinfo */ 758 memcpy(cinfo, algo->icv, digest_len); 759 cinfo += digest_len; 760 761 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx 762 + sizeof(algo->cfgword); 763 otarget = itarget + digest_len; 764 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); 765 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; 766 767 dir->npe_ctx_idx += init_len; 768 dir->npe_mode |= NPE_OP_HASH_ENABLE; 769 770 if (!encrypt) 771 dir->npe_mode |= NPE_OP_HASH_VERIFY; 772 773 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, 774 init_len, npe_ctx_addr, key, key_len); 775 if (ret) 776 return ret; 777 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, 778 init_len, npe_ctx_addr, key, key_len); 779} 780 781static int gen_rev_aes_key(struct crypto_tfm *tfm) 782{ 783 struct crypt_ctl *crypt; 784 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 785 struct ix_sa_dir *dir = &ctx->decrypt; 786 787 crypt = get_crypt_desc_emerg(); 788 if (!crypt) 789 return -EAGAIN; 790 791 *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); 792 793 crypt->data.tfm = tfm; 794 crypt->crypt_offs = 0; 795 crypt->crypt_len = AES_BLOCK128; 796 crypt->src_buf = 0; 797 crypt->crypto_ctx = dir->npe_ctx_phys; 798 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); 799 crypt->mode = NPE_OP_ENC_GEN_KEY; 800 crypt->init_len = dir->npe_ctx_idx; 801 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; 802 803 atomic_inc(&ctx->configuring); 804 qmgr_put_entry(send_qid, crypt_virt2phys(crypt)); 805 BUG_ON(qmgr_stat_overflow(send_qid)); 806 return 0; 807} 808 809static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key, 810 int key_len) 811{ 812 u8 *cinfo; 813 u32 cipher_cfg; 814 u32 keylen_cfg = 0; 815 struct ix_sa_dir *dir; 816 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 817 int err; 818 819 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 820 cinfo = dir->npe_ctx; 821 822 if (encrypt) { 823 cipher_cfg = cipher_cfg_enc(tfm); 824 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; 825 } else { 826 cipher_cfg = cipher_cfg_dec(tfm); 827 } 828 if (cipher_cfg & MOD_AES) { 829 switch (key_len) { 830 case 16: 831 keylen_cfg = MOD_AES128; 832 break; 833 case 24: 834 keylen_cfg = MOD_AES192; 835 break; 836 case 32: 837 keylen_cfg = MOD_AES256; 838 break; 839 default: 840 return -EINVAL; 841 } 842 cipher_cfg |= keylen_cfg; 843 } else { 844 err = crypto_des_verify_key(tfm, key); 845 if (err) 846 return err; 847 } 848 /* write cfg word to cryptinfo */ 849 *(u32 *)cinfo = cpu_to_be32(cipher_cfg); 850 cinfo += sizeof(cipher_cfg); 851 852 /* write cipher key to cryptinfo */ 853 memcpy(cinfo, key, key_len); 854 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ 855 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { 856 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len); 857 key_len = DES3_EDE_KEY_SIZE; 858 } 859 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; 860 dir->npe_mode |= NPE_OP_CRYPT_ENABLE; 861 if ((cipher_cfg & MOD_AES) && !encrypt) 862 return gen_rev_aes_key(tfm); 863 864 return 0; 865} 866 867static struct buffer_desc *chainup_buffers(struct device *dev, 868 struct scatterlist *sg, unsigned int nbytes, 869 struct buffer_desc *buf, gfp_t flags, 870 enum dma_data_direction dir) 871{ 872 for (; nbytes > 0; sg = sg_next(sg)) { 873 unsigned int len = min(nbytes, sg->length); 874 struct buffer_desc *next_buf; 875 dma_addr_t next_buf_phys; 876 void *ptr; 877 878 nbytes -= len; 879 ptr = sg_virt(sg); 880 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 881 if (!next_buf) { 882 buf = NULL; 883 break; 884 } 885 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); 886 buf->next = next_buf; 887 buf->phys_next = next_buf_phys; 888 buf = next_buf; 889 890 buf->phys_addr = sg_dma_address(sg); 891 buf->buf_len = len; 892 buf->dir = dir; 893 } 894 buf->next = NULL; 895 buf->phys_next = 0; 896 return buf; 897} 898 899static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, 900 unsigned int key_len) 901{ 902 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 903 int ret; 904 905 init_completion(&ctx->completion); 906 atomic_inc(&ctx->configuring); 907 908 reset_sa_dir(&ctx->encrypt); 909 reset_sa_dir(&ctx->decrypt); 910 911 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; 912 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; 913 914 ret = setup_cipher(&tfm->base, 0, key, key_len); 915 if (ret) 916 goto out; 917 ret = setup_cipher(&tfm->base, 1, key, key_len); 918out: 919 if (!atomic_dec_and_test(&ctx->configuring)) 920 wait_for_completion(&ctx->completion); 921 if (ret) 922 return ret; 923 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); 924 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 925 926 return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len); 927} 928 929static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, 930 unsigned int key_len) 931{ 932 return verify_skcipher_des3_key(tfm, key) ?: 933 ablk_setkey(tfm, key, key_len); 934} 935 936static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, 937 unsigned int key_len) 938{ 939 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 940 941 /* the nonce is stored in bytes at end of key */ 942 if (key_len < CTR_RFC3686_NONCE_SIZE) 943 return -EINVAL; 944 945 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), 946 CTR_RFC3686_NONCE_SIZE); 947 948 key_len -= CTR_RFC3686_NONCE_SIZE; 949 return ablk_setkey(tfm, key, key_len); 950} 951 952static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt) 953{ 954 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); 955 struct ixp_ctx *op = crypto_skcipher_ctx(tfm); 956 struct ablk_ctx *rctx = skcipher_request_ctx(areq); 957 int err; 958 959 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); 960 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, 961 areq->base.complete, areq->base.data); 962 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, 963 areq->cryptlen, areq->iv); 964 if (encrypt) 965 err = crypto_skcipher_encrypt(&rctx->fallback_req); 966 else 967 err = crypto_skcipher_decrypt(&rctx->fallback_req); 968 return err; 969} 970 971static int ablk_perform(struct skcipher_request *req, int encrypt) 972{ 973 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 974 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 975 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 976 struct ix_sa_dir *dir; 977 struct crypt_ctl *crypt; 978 unsigned int nbytes = req->cryptlen; 979 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 980 struct ablk_ctx *req_ctx = skcipher_request_ctx(req); 981 struct buffer_desc src_hook; 982 struct device *dev = &pdev->dev; 983 unsigned int offset; 984 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 985 GFP_KERNEL : GFP_ATOMIC; 986 987 if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1) 988 return ixp4xx_cipher_fallback(req, encrypt); 989 990 if (qmgr_stat_full(send_qid)) 991 return -EAGAIN; 992 if (atomic_read(&ctx->configuring)) 993 return -EAGAIN; 994 995 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 996 req_ctx->encrypt = encrypt; 997 998 crypt = get_crypt_desc(); 999 if (!crypt) 1000 return -ENOMEM; 1001 1002 crypt->data.ablk_req = req; 1003 crypt->crypto_ctx = dir->npe_ctx_phys; 1004 crypt->mode = dir->npe_mode; 1005 crypt->init_len = dir->npe_ctx_idx; 1006 1007 crypt->crypt_offs = 0; 1008 crypt->crypt_len = nbytes; 1009 1010 BUG_ON(ivsize && !req->iv); 1011 memcpy(crypt->iv, req->iv, ivsize); 1012 if (ivsize > 0 && !encrypt) { 1013 offset = req->cryptlen - ivsize; 1014 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0); 1015 } 1016 if (req->src != req->dst) { 1017 struct buffer_desc dst_hook; 1018 1019 crypt->mode |= NPE_OP_NOT_IN_PLACE; 1020 /* This was never tested by Intel 1021 * for more than one dst buffer, I think. */ 1022 req_ctx->dst = NULL; 1023 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 1024 flags, DMA_FROM_DEVICE)) 1025 goto free_buf_dest; 1026 src_direction = DMA_TO_DEVICE; 1027 req_ctx->dst = dst_hook.next; 1028 crypt->dst_buf = dst_hook.phys_next; 1029 } else { 1030 req_ctx->dst = NULL; 1031 } 1032 req_ctx->src = NULL; 1033 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags, 1034 src_direction)) 1035 goto free_buf_src; 1036 1037 req_ctx->src = src_hook.next; 1038 crypt->src_buf = src_hook.phys_next; 1039 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; 1040 qmgr_put_entry(send_qid, crypt_virt2phys(crypt)); 1041 BUG_ON(qmgr_stat_overflow(send_qid)); 1042 return -EINPROGRESS; 1043 1044free_buf_src: 1045 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 1046free_buf_dest: 1047 if (req->src != req->dst) 1048 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 1049 1050 crypt->ctl_flags = CTL_FLAG_UNUSED; 1051 return -ENOMEM; 1052} 1053 1054static int ablk_encrypt(struct skcipher_request *req) 1055{ 1056 return ablk_perform(req, 1); 1057} 1058 1059static int ablk_decrypt(struct skcipher_request *req) 1060{ 1061 return ablk_perform(req, 0); 1062} 1063 1064static int ablk_rfc3686_crypt(struct skcipher_request *req) 1065{ 1066 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1067 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 1068 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 1069 u8 *info = req->iv; 1070 int ret; 1071 1072 /* set up counter block */ 1073 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 1074 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); 1075 1076 /* initialize counter portion of counter block */ 1077 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 1078 cpu_to_be32(1); 1079 1080 req->iv = iv; 1081 ret = ablk_perform(req, 1); 1082 req->iv = info; 1083 return ret; 1084} 1085 1086static int aead_perform(struct aead_request *req, int encrypt, 1087 int cryptoffset, int eff_cryptlen, u8 *iv) 1088{ 1089 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1090 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1091 unsigned int ivsize = crypto_aead_ivsize(tfm); 1092 unsigned int authsize = crypto_aead_authsize(tfm); 1093 struct ix_sa_dir *dir; 1094 struct crypt_ctl *crypt; 1095 unsigned int cryptlen; 1096 struct buffer_desc *buf, src_hook; 1097 struct aead_ctx *req_ctx = aead_request_ctx(req); 1098 struct device *dev = &pdev->dev; 1099 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 1100 GFP_KERNEL : GFP_ATOMIC; 1101 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 1102 unsigned int lastlen; 1103 1104 if (qmgr_stat_full(send_qid)) 1105 return -EAGAIN; 1106 if (atomic_read(&ctx->configuring)) 1107 return -EAGAIN; 1108 1109 if (encrypt) { 1110 dir = &ctx->encrypt; 1111 cryptlen = req->cryptlen; 1112 } else { 1113 dir = &ctx->decrypt; 1114 /* req->cryptlen includes the authsize when decrypting */ 1115 cryptlen = req->cryptlen - authsize; 1116 eff_cryptlen -= authsize; 1117 } 1118 crypt = get_crypt_desc(); 1119 if (!crypt) 1120 return -ENOMEM; 1121 1122 crypt->data.aead_req = req; 1123 crypt->crypto_ctx = dir->npe_ctx_phys; 1124 crypt->mode = dir->npe_mode; 1125 crypt->init_len = dir->npe_ctx_idx; 1126 1127 crypt->crypt_offs = cryptoffset; 1128 crypt->crypt_len = eff_cryptlen; 1129 1130 crypt->auth_offs = 0; 1131 crypt->auth_len = req->assoclen + cryptlen; 1132 BUG_ON(ivsize && !req->iv); 1133 memcpy(crypt->iv, req->iv, ivsize); 1134 1135 buf = chainup_buffers(dev, req->src, crypt->auth_len, 1136 &src_hook, flags, src_direction); 1137 req_ctx->src = src_hook.next; 1138 crypt->src_buf = src_hook.phys_next; 1139 if (!buf) 1140 goto free_buf_src; 1141 1142 lastlen = buf->buf_len; 1143 if (lastlen >= authsize) 1144 crypt->icv_rev_aes = buf->phys_addr + 1145 buf->buf_len - authsize; 1146 1147 req_ctx->dst = NULL; 1148 1149 if (req->src != req->dst) { 1150 struct buffer_desc dst_hook; 1151 1152 crypt->mode |= NPE_OP_NOT_IN_PLACE; 1153 src_direction = DMA_TO_DEVICE; 1154 1155 buf = chainup_buffers(dev, req->dst, crypt->auth_len, 1156 &dst_hook, flags, DMA_FROM_DEVICE); 1157 req_ctx->dst = dst_hook.next; 1158 crypt->dst_buf = dst_hook.phys_next; 1159 1160 if (!buf) 1161 goto free_buf_dst; 1162 1163 if (encrypt) { 1164 lastlen = buf->buf_len; 1165 if (lastlen >= authsize) 1166 crypt->icv_rev_aes = buf->phys_addr + 1167 buf->buf_len - authsize; 1168 } 1169 } 1170 1171 if (unlikely(lastlen < authsize)) { 1172 /* The 12 hmac bytes are scattered, 1173 * we need to copy them into a safe buffer */ 1174 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1175 &crypt->icv_rev_aes); 1176 if (unlikely(!req_ctx->hmac_virt)) 1177 goto free_buf_dst; 1178 if (!encrypt) { 1179 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1180 req->src, cryptlen, authsize, 0); 1181 } 1182 req_ctx->encrypt = encrypt; 1183 } else { 1184 req_ctx->hmac_virt = NULL; 1185 } 1186 1187 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; 1188 qmgr_put_entry(send_qid, crypt_virt2phys(crypt)); 1189 BUG_ON(qmgr_stat_overflow(send_qid)); 1190 return -EINPROGRESS; 1191 1192free_buf_dst: 1193 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 1194free_buf_src: 1195 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 1196 crypt->ctl_flags = CTL_FLAG_UNUSED; 1197 return -ENOMEM; 1198} 1199 1200static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) 1201{ 1202 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1203 unsigned int digest_len = crypto_aead_maxauthsize(tfm); 1204 int ret; 1205 1206 if (!ctx->enckey_len && !ctx->authkey_len) 1207 return 0; 1208 init_completion(&ctx->completion); 1209 atomic_inc(&ctx->configuring); 1210 1211 reset_sa_dir(&ctx->encrypt); 1212 reset_sa_dir(&ctx->decrypt); 1213 1214 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); 1215 if (ret) 1216 goto out; 1217 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); 1218 if (ret) 1219 goto out; 1220 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, 1221 ctx->authkey_len, digest_len); 1222 if (ret) 1223 goto out; 1224 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, 1225 ctx->authkey_len, digest_len); 1226out: 1227 if (!atomic_dec_and_test(&ctx->configuring)) 1228 wait_for_completion(&ctx->completion); 1229 return ret; 1230} 1231 1232static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1233{ 1234 int max = crypto_aead_maxauthsize(tfm) >> 2; 1235 1236 if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3)) 1237 return -EINVAL; 1238 return aead_setup(tfm, authsize); 1239} 1240 1241static int aead_setkey(struct crypto_aead *tfm, const u8 *key, 1242 unsigned int keylen) 1243{ 1244 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1245 struct crypto_authenc_keys keys; 1246 1247 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 1248 goto badkey; 1249 1250 if (keys.authkeylen > sizeof(ctx->authkey)) 1251 goto badkey; 1252 1253 if (keys.enckeylen > sizeof(ctx->enckey)) 1254 goto badkey; 1255 1256 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1257 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1258 ctx->authkey_len = keys.authkeylen; 1259 ctx->enckey_len = keys.enckeylen; 1260 1261 memzero_explicit(&keys, sizeof(keys)); 1262 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1263badkey: 1264 memzero_explicit(&keys, sizeof(keys)); 1265 return -EINVAL; 1266} 1267 1268static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1269 unsigned int keylen) 1270{ 1271 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1272 struct crypto_authenc_keys keys; 1273 int err; 1274 1275 err = crypto_authenc_extractkeys(&keys, key, keylen); 1276 if (unlikely(err)) 1277 goto badkey; 1278 1279 err = -EINVAL; 1280 if (keys.authkeylen > sizeof(ctx->authkey)) 1281 goto badkey; 1282 1283 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen); 1284 if (err) 1285 goto badkey; 1286 1287 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1288 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1289 ctx->authkey_len = keys.authkeylen; 1290 ctx->enckey_len = keys.enckeylen; 1291 1292 memzero_explicit(&keys, sizeof(keys)); 1293 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1294badkey: 1295 memzero_explicit(&keys, sizeof(keys)); 1296 return err; 1297} 1298 1299static int aead_encrypt(struct aead_request *req) 1300{ 1301 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); 1302} 1303 1304static int aead_decrypt(struct aead_request *req) 1305{ 1306 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv); 1307} 1308 1309static struct ixp_alg ixp4xx_algos[] = { 1310{ 1311 .crypto = { 1312 .base.cra_name = "cbc(des)", 1313 .base.cra_blocksize = DES_BLOCK_SIZE, 1314 1315 .min_keysize = DES_KEY_SIZE, 1316 .max_keysize = DES_KEY_SIZE, 1317 .ivsize = DES_BLOCK_SIZE, 1318 }, 1319 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1320 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1321 1322}, { 1323 .crypto = { 1324 .base.cra_name = "ecb(des)", 1325 .base.cra_blocksize = DES_BLOCK_SIZE, 1326 .min_keysize = DES_KEY_SIZE, 1327 .max_keysize = DES_KEY_SIZE, 1328 }, 1329 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, 1330 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, 1331}, { 1332 .crypto = { 1333 .base.cra_name = "cbc(des3_ede)", 1334 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1335 1336 .min_keysize = DES3_EDE_KEY_SIZE, 1337 .max_keysize = DES3_EDE_KEY_SIZE, 1338 .ivsize = DES3_EDE_BLOCK_SIZE, 1339 .setkey = ablk_des3_setkey, 1340 }, 1341 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1342 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1343}, { 1344 .crypto = { 1345 .base.cra_name = "ecb(des3_ede)", 1346 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1347 1348 .min_keysize = DES3_EDE_KEY_SIZE, 1349 .max_keysize = DES3_EDE_KEY_SIZE, 1350 .setkey = ablk_des3_setkey, 1351 }, 1352 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, 1353 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, 1354}, { 1355 .crypto = { 1356 .base.cra_name = "cbc(aes)", 1357 .base.cra_blocksize = AES_BLOCK_SIZE, 1358 1359 .min_keysize = AES_MIN_KEY_SIZE, 1360 .max_keysize = AES_MAX_KEY_SIZE, 1361 .ivsize = AES_BLOCK_SIZE, 1362 }, 1363 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1364 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1365}, { 1366 .crypto = { 1367 .base.cra_name = "ecb(aes)", 1368 .base.cra_blocksize = AES_BLOCK_SIZE, 1369 1370 .min_keysize = AES_MIN_KEY_SIZE, 1371 .max_keysize = AES_MAX_KEY_SIZE, 1372 }, 1373 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, 1374 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, 1375}, { 1376 .crypto = { 1377 .base.cra_name = "ctr(aes)", 1378 .base.cra_blocksize = 1, 1379 1380 .min_keysize = AES_MIN_KEY_SIZE, 1381 .max_keysize = AES_MAX_KEY_SIZE, 1382 .ivsize = AES_BLOCK_SIZE, 1383 }, 1384 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1385 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1386}, { 1387 .crypto = { 1388 .base.cra_name = "rfc3686(ctr(aes))", 1389 .base.cra_blocksize = 1, 1390 1391 .min_keysize = AES_MIN_KEY_SIZE, 1392 .max_keysize = AES_MAX_KEY_SIZE, 1393 .ivsize = AES_BLOCK_SIZE, 1394 .setkey = ablk_rfc3686_setkey, 1395 .encrypt = ablk_rfc3686_crypt, 1396 .decrypt = ablk_rfc3686_crypt, 1397 }, 1398 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1399 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1400} }; 1401 1402static struct ixp_aead_alg ixp4xx_aeads[] = { 1403{ 1404 .crypto = { 1405 .base = { 1406 .cra_name = "authenc(hmac(md5),cbc(des))", 1407 .cra_blocksize = DES_BLOCK_SIZE, 1408 }, 1409 .ivsize = DES_BLOCK_SIZE, 1410 .maxauthsize = MD5_DIGEST_SIZE, 1411 }, 1412 .hash = &hash_alg_md5, 1413 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1414 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1415}, { 1416 .crypto = { 1417 .base = { 1418 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1419 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1420 }, 1421 .ivsize = DES3_EDE_BLOCK_SIZE, 1422 .maxauthsize = MD5_DIGEST_SIZE, 1423 .setkey = des3_aead_setkey, 1424 }, 1425 .hash = &hash_alg_md5, 1426 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1427 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1428}, { 1429 .crypto = { 1430 .base = { 1431 .cra_name = "authenc(hmac(sha1),cbc(des))", 1432 .cra_blocksize = DES_BLOCK_SIZE, 1433 }, 1434 .ivsize = DES_BLOCK_SIZE, 1435 .maxauthsize = SHA1_DIGEST_SIZE, 1436 }, 1437 .hash = &hash_alg_sha1, 1438 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1439 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1440}, { 1441 .crypto = { 1442 .base = { 1443 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1444 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1445 }, 1446 .ivsize = DES3_EDE_BLOCK_SIZE, 1447 .maxauthsize = SHA1_DIGEST_SIZE, 1448 .setkey = des3_aead_setkey, 1449 }, 1450 .hash = &hash_alg_sha1, 1451 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1452 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1453}, { 1454 .crypto = { 1455 .base = { 1456 .cra_name = "authenc(hmac(md5),cbc(aes))", 1457 .cra_blocksize = AES_BLOCK_SIZE, 1458 }, 1459 .ivsize = AES_BLOCK_SIZE, 1460 .maxauthsize = MD5_DIGEST_SIZE, 1461 }, 1462 .hash = &hash_alg_md5, 1463 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1464 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1465}, { 1466 .crypto = { 1467 .base = { 1468 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1469 .cra_blocksize = AES_BLOCK_SIZE, 1470 }, 1471 .ivsize = AES_BLOCK_SIZE, 1472 .maxauthsize = SHA1_DIGEST_SIZE, 1473 }, 1474 .hash = &hash_alg_sha1, 1475 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1476 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1477} }; 1478 1479#define IXP_POSTFIX "-ixp4xx" 1480 1481static int ixp_crypto_probe(struct platform_device *_pdev) 1482{ 1483 struct device *dev = &_pdev->dev; 1484 int num = ARRAY_SIZE(ixp4xx_algos); 1485 int i, err; 1486 1487 pdev = _pdev; 1488 1489 err = init_ixp_crypto(dev); 1490 if (err) 1491 return err; 1492 1493 for (i = 0; i < num; i++) { 1494 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto; 1495 1496 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 1497 "%s"IXP_POSTFIX, cra->base.cra_name) >= 1498 CRYPTO_MAX_ALG_NAME) 1499 continue; 1500 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) 1501 continue; 1502 1503 /* block ciphers */ 1504 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1505 CRYPTO_ALG_ASYNC | 1506 CRYPTO_ALG_ALLOCATES_MEMORY | 1507 CRYPTO_ALG_NEED_FALLBACK; 1508 if (!cra->setkey) 1509 cra->setkey = ablk_setkey; 1510 if (!cra->encrypt) 1511 cra->encrypt = ablk_encrypt; 1512 if (!cra->decrypt) 1513 cra->decrypt = ablk_decrypt; 1514 cra->init = init_tfm_ablk; 1515 cra->exit = exit_tfm_ablk; 1516 1517 cra->base.cra_ctxsize = sizeof(struct ixp_ctx); 1518 cra->base.cra_module = THIS_MODULE; 1519 cra->base.cra_alignmask = 3; 1520 cra->base.cra_priority = 300; 1521 if (crypto_register_skcipher(cra)) 1522 dev_err(&pdev->dev, "Failed to register '%s'\n", 1523 cra->base.cra_name); 1524 else 1525 ixp4xx_algos[i].registered = 1; 1526 } 1527 1528 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1529 struct aead_alg *cra = &ixp4xx_aeads[i].crypto; 1530 1531 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 1532 "%s"IXP_POSTFIX, cra->base.cra_name) >= 1533 CRYPTO_MAX_ALG_NAME) 1534 continue; 1535 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) 1536 continue; 1537 1538 /* authenc */ 1539 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1540 CRYPTO_ALG_ASYNC | 1541 CRYPTO_ALG_ALLOCATES_MEMORY; 1542 cra->setkey = cra->setkey ?: aead_setkey; 1543 cra->setauthsize = aead_setauthsize; 1544 cra->encrypt = aead_encrypt; 1545 cra->decrypt = aead_decrypt; 1546 cra->init = init_tfm_aead; 1547 cra->exit = exit_tfm_aead; 1548 1549 cra->base.cra_ctxsize = sizeof(struct ixp_ctx); 1550 cra->base.cra_module = THIS_MODULE; 1551 cra->base.cra_alignmask = 3; 1552 cra->base.cra_priority = 300; 1553 1554 if (crypto_register_aead(cra)) 1555 dev_err(&pdev->dev, "Failed to register '%s'\n", 1556 cra->base.cra_driver_name); 1557 else 1558 ixp4xx_aeads[i].registered = 1; 1559 } 1560 return 0; 1561} 1562 1563static int ixp_crypto_remove(struct platform_device *pdev) 1564{ 1565 int num = ARRAY_SIZE(ixp4xx_algos); 1566 int i; 1567 1568 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1569 if (ixp4xx_aeads[i].registered) 1570 crypto_unregister_aead(&ixp4xx_aeads[i].crypto); 1571 } 1572 1573 for (i = 0; i < num; i++) { 1574 if (ixp4xx_algos[i].registered) 1575 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto); 1576 } 1577 release_ixp_crypto(&pdev->dev); 1578 1579 return 0; 1580} 1581static const struct of_device_id ixp4xx_crypto_of_match[] = { 1582 { 1583 .compatible = "intel,ixp4xx-crypto", 1584 }, 1585 {}, 1586}; 1587 1588static struct platform_driver ixp_crypto_driver = { 1589 .probe = ixp_crypto_probe, 1590 .remove = ixp_crypto_remove, 1591 .driver = { 1592 .name = "ixp4xx_crypto", 1593 .of_match_table = ixp4xx_crypto_of_match, 1594 }, 1595}; 1596module_platform_driver(ixp_crypto_driver); 1597 1598MODULE_LICENSE("GPL"); 1599MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); 1600MODULE_DESCRIPTION("IXP4xx hardware crypto"); 1601