cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

queueing.c (2715B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
      4 */
      5
      6#include "queueing.h"
      7#include <linux/skb_array.h>
      8
      9struct multicore_worker __percpu *
     10wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
     11{
     12	int cpu;
     13	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
     14
     15	if (!worker)
     16		return NULL;
     17
     18	for_each_possible_cpu(cpu) {
     19		per_cpu_ptr(worker, cpu)->ptr = ptr;
     20		INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
     21	}
     22	return worker;
     23}
     24
     25int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
     26			 unsigned int len)
     27{
     28	int ret;
     29
     30	memset(queue, 0, sizeof(*queue));
     31	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
     32	if (ret)
     33		return ret;
     34	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
     35	if (!queue->worker) {
     36		ptr_ring_cleanup(&queue->ring, NULL);
     37		return -ENOMEM;
     38	}
     39	return 0;
     40}
     41
     42void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
     43{
     44	free_percpu(queue->worker);
     45	WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
     46	ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
     47}
     48
     49#define NEXT(skb) ((skb)->prev)
     50#define STUB(queue) ((struct sk_buff *)&queue->empty)
     51
     52void wg_prev_queue_init(struct prev_queue *queue)
     53{
     54	NEXT(STUB(queue)) = NULL;
     55	queue->head = queue->tail = STUB(queue);
     56	queue->peeked = NULL;
     57	atomic_set(&queue->count, 0);
     58	BUILD_BUG_ON(
     59		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
     60							offsetof(struct prev_queue, empty) ||
     61		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
     62							 offsetof(struct prev_queue, empty));
     63}
     64
     65static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
     66{
     67	WRITE_ONCE(NEXT(skb), NULL);
     68	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
     69}
     70
     71bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
     72{
     73	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
     74		return false;
     75	__wg_prev_queue_enqueue(queue, skb);
     76	return true;
     77}
     78
     79struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
     80{
     81	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
     82
     83	if (tail == STUB(queue)) {
     84		if (!next)
     85			return NULL;
     86		queue->tail = next;
     87		tail = next;
     88		next = smp_load_acquire(&NEXT(next));
     89	}
     90	if (next) {
     91		queue->tail = next;
     92		atomic_dec(&queue->count);
     93		return tail;
     94	}
     95	if (tail != READ_ONCE(queue->head))
     96		return NULL;
     97	__wg_prev_queue_enqueue(queue, STUB(queue));
     98	next = smp_load_acquire(&NEXT(tail));
     99	if (next) {
    100		queue->tail = next;
    101		atomic_dec(&queue->count);
    102		return tail;
    103	}
    104	return NULL;
    105}
    106
    107#undef NEXT
    108#undef STUB