cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qat_algs_send.c (2080B)


      1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
      2/* Copyright(c) 2022 Intel Corporation */
      3#include "adf_transport.h"
      4#include "qat_algs_send.h"
      5#include "qat_crypto.h"
      6
      7#define ADF_MAX_RETRIES		20
      8
      9static int qat_alg_send_message_retry(struct qat_alg_req *req)
     10{
     11	int ret = 0, ctr = 0;
     12
     13	do {
     14		ret = adf_send_message(req->tx_ring, req->fw_req);
     15	} while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
     16
     17	if (ret == -EAGAIN)
     18		return -ENOSPC;
     19
     20	return -EINPROGRESS;
     21}
     22
     23void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
     24{
     25	struct qat_alg_req *req, *tmp;
     26
     27	spin_lock_bh(&backlog->lock);
     28	list_for_each_entry_safe(req, tmp, &backlog->list, list) {
     29		if (adf_send_message(req->tx_ring, req->fw_req)) {
     30			/* The HW ring is full. Do nothing.
     31			 * qat_alg_send_backlog() will be invoked again by
     32			 * another callback.
     33			 */
     34			break;
     35		}
     36		list_del(&req->list);
     37		req->base->complete(req->base, -EINPROGRESS);
     38	}
     39	spin_unlock_bh(&backlog->lock);
     40}
     41
     42static void qat_alg_backlog_req(struct qat_alg_req *req,
     43				struct qat_instance_backlog *backlog)
     44{
     45	INIT_LIST_HEAD(&req->list);
     46
     47	spin_lock_bh(&backlog->lock);
     48	list_add_tail(&req->list, &backlog->list);
     49	spin_unlock_bh(&backlog->lock);
     50}
     51
     52static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
     53{
     54	struct qat_instance_backlog *backlog = req->backlog;
     55	struct adf_etr_ring_data *tx_ring = req->tx_ring;
     56	u32 *fw_req = req->fw_req;
     57
     58	/* If any request is already backlogged, then add to backlog list */
     59	if (!list_empty(&backlog->list))
     60		goto enqueue;
     61
     62	/* If ring is nearly full, then add to backlog list */
     63	if (adf_ring_nearly_full(tx_ring))
     64		goto enqueue;
     65
     66	/* If adding request to HW ring fails, then add to backlog list */
     67	if (adf_send_message(tx_ring, fw_req))
     68		goto enqueue;
     69
     70	return -EINPROGRESS;
     71
     72enqueue:
     73	qat_alg_backlog_req(req, backlog);
     74
     75	return -EBUSY;
     76}
     77
     78int qat_alg_send_message(struct qat_alg_req *req)
     79{
     80	u32 flags = req->base->flags;
     81
     82	if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
     83		return qat_alg_send_message_maybacklog(req);
     84	else
     85		return qat_alg_send_message_retry(req);
     86}