cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qat_crypto.h (2658B)


      1/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
      2/* Copyright(c) 2014 - 2020 Intel Corporation */
      3#ifndef _QAT_CRYPTO_INSTANCE_H_
      4#define _QAT_CRYPTO_INSTANCE_H_
      5
      6#include <crypto/aes.h>
      7#include <linux/list.h>
      8#include <linux/slab.h>
      9#include "adf_accel_devices.h"
     10#include "icp_qat_fw_la.h"
     11
     12struct qat_instance_backlog {
     13	struct list_head list;
     14	spinlock_t lock; /* protects backlog list */
     15};
     16
     17struct qat_alg_req {
     18	u32 *fw_req;
     19	struct adf_etr_ring_data *tx_ring;
     20	struct crypto_async_request *base;
     21	struct list_head list;
     22	struct qat_instance_backlog *backlog;
     23};
     24
     25struct qat_crypto_instance {
     26	struct adf_etr_ring_data *sym_tx;
     27	struct adf_etr_ring_data *sym_rx;
     28	struct adf_etr_ring_data *pke_tx;
     29	struct adf_etr_ring_data *pke_rx;
     30	struct adf_accel_dev *accel_dev;
     31	struct list_head list;
     32	unsigned long state;
     33	int id;
     34	atomic_t refctr;
     35	struct qat_instance_backlog backlog;
     36};
     37
     38#define QAT_MAX_BUFF_DESC	4
     39
     40struct qat_alg_buf {
     41	u32 len;
     42	u32 resrvd;
     43	u64 addr;
     44} __packed;
     45
     46struct qat_alg_buf_list {
     47	u64 resrvd;
     48	u32 num_bufs;
     49	u32 num_mapped_bufs;
     50	struct qat_alg_buf bufers[];
     51} __packed;
     52
     53struct qat_alg_fixed_buf_list {
     54	struct qat_alg_buf_list sgl_hdr;
     55	struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
     56} __packed __aligned(64);
     57
     58struct qat_crypto_request_buffs {
     59	struct qat_alg_buf_list *bl;
     60	dma_addr_t blp;
     61	struct qat_alg_buf_list *blout;
     62	dma_addr_t bloutp;
     63	size_t sz;
     64	size_t sz_out;
     65	bool sgl_src_valid;
     66	bool sgl_dst_valid;
     67	struct qat_alg_fixed_buf_list sgl_src;
     68	struct qat_alg_fixed_buf_list sgl_dst;
     69};
     70
     71struct qat_crypto_request;
     72
     73struct qat_crypto_request {
     74	struct icp_qat_fw_la_bulk_req req;
     75	union {
     76		struct qat_alg_aead_ctx *aead_ctx;
     77		struct qat_alg_skcipher_ctx *skcipher_ctx;
     78	};
     79	union {
     80		struct aead_request *aead_req;
     81		struct skcipher_request *skcipher_req;
     82	};
     83	struct qat_crypto_request_buffs buf;
     84	void (*cb)(struct icp_qat_fw_la_resp *resp,
     85		   struct qat_crypto_request *req);
     86	union {
     87		struct {
     88			__be64 iv_hi;
     89			__be64 iv_lo;
     90		};
     91		u8 iv[AES_BLOCK_SIZE];
     92	};
     93	bool encryption;
     94	struct qat_alg_req alg_req;
     95};
     96
     97static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
     98{
     99	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
    100	u32 mask = ~hw_device->accel_capabilities_mask;
    101
    102	if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
    103		return false;
    104	if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
    105		return false;
    106	if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
    107		return false;
    108
    109	return true;
    110}
    111
    112static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
    113{
    114	return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
    115}
    116
    117#endif