cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

io_uring-cp.c (5334B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Simple test program that demonstrates a file copy through io_uring. This
      4 * uses the API exposed by liburing.
      5 *
      6 * Copyright (C) 2018-2019 Jens Axboe
      7 */
      8#include <stdio.h>
      9#include <fcntl.h>
     10#include <string.h>
     11#include <stdlib.h>
     12#include <unistd.h>
     13#include <assert.h>
     14#include <errno.h>
     15#include <inttypes.h>
     16#include <sys/types.h>
     17#include <sys/stat.h>
     18#include <sys/ioctl.h>
     19
     20#include "liburing.h"
     21
     22#define QD	64
     23#define BS	(32*1024)
     24
     25static int infd, outfd;
     26
     27struct io_data {
     28	int read;
     29	off_t first_offset, offset;
     30	size_t first_len;
     31	struct iovec iov;
     32};
     33
     34static int setup_context(unsigned entries, struct io_uring *ring)
     35{
     36	int ret;
     37
     38	ret = io_uring_queue_init(entries, ring, 0);
     39	if (ret < 0) {
     40		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
     41		return -1;
     42	}
     43
     44	return 0;
     45}
     46
     47static int get_file_size(int fd, off_t *size)
     48{
     49	struct stat st;
     50
     51	if (fstat(fd, &st) < 0)
     52		return -1;
     53	if (S_ISREG(st.st_mode)) {
     54		*size = st.st_size;
     55		return 0;
     56	} else if (S_ISBLK(st.st_mode)) {
     57		unsigned long long bytes;
     58
     59		if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
     60			return -1;
     61
     62		*size = bytes;
     63		return 0;
     64	}
     65
     66	return -1;
     67}
     68
     69static void queue_prepped(struct io_uring *ring, struct io_data *data)
     70{
     71	struct io_uring_sqe *sqe;
     72
     73	sqe = io_uring_get_sqe(ring);
     74	assert(sqe);
     75
     76	if (data->read)
     77		io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset);
     78	else
     79		io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset);
     80
     81	io_uring_sqe_set_data(sqe, data);
     82}
     83
     84static int queue_read(struct io_uring *ring, off_t size, off_t offset)
     85{
     86	struct io_uring_sqe *sqe;
     87	struct io_data *data;
     88
     89	data = malloc(size + sizeof(*data));
     90	if (!data)
     91		return 1;
     92
     93	sqe = io_uring_get_sqe(ring);
     94	if (!sqe) {
     95		free(data);
     96		return 1;
     97	}
     98
     99	data->read = 1;
    100	data->offset = data->first_offset = offset;
    101
    102	data->iov.iov_base = data + 1;
    103	data->iov.iov_len = size;
    104	data->first_len = size;
    105
    106	io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
    107	io_uring_sqe_set_data(sqe, data);
    108	return 0;
    109}
    110
    111static void queue_write(struct io_uring *ring, struct io_data *data)
    112{
    113	data->read = 0;
    114	data->offset = data->first_offset;
    115
    116	data->iov.iov_base = data + 1;
    117	data->iov.iov_len = data->first_len;
    118
    119	queue_prepped(ring, data);
    120	io_uring_submit(ring);
    121}
    122
    123static int copy_file(struct io_uring *ring, off_t insize)
    124{
    125	unsigned long reads, writes;
    126	struct io_uring_cqe *cqe;
    127	off_t write_left, offset;
    128	int ret;
    129
    130	write_left = insize;
    131	writes = reads = offset = 0;
    132
    133	while (insize || write_left) {
    134		int had_reads, got_comp;
    135
    136		/*
    137		 * Queue up as many reads as we can
    138		 */
    139		had_reads = reads;
    140		while (insize) {
    141			off_t this_size = insize;
    142
    143			if (reads + writes >= QD)
    144				break;
    145			if (this_size > BS)
    146				this_size = BS;
    147			else if (!this_size)
    148				break;
    149
    150			if (queue_read(ring, this_size, offset))
    151				break;
    152
    153			insize -= this_size;
    154			offset += this_size;
    155			reads++;
    156		}
    157
    158		if (had_reads != reads) {
    159			ret = io_uring_submit(ring);
    160			if (ret < 0) {
    161				fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
    162				break;
    163			}
    164		}
    165
    166		/*
    167		 * Queue is full at this point. Find at least one completion.
    168		 */
    169		got_comp = 0;
    170		while (write_left) {
    171			struct io_data *data;
    172
    173			if (!got_comp) {
    174				ret = io_uring_wait_cqe(ring, &cqe);
    175				got_comp = 1;
    176			} else {
    177				ret = io_uring_peek_cqe(ring, &cqe);
    178				if (ret == -EAGAIN) {
    179					cqe = NULL;
    180					ret = 0;
    181				}
    182			}
    183			if (ret < 0) {
    184				fprintf(stderr, "io_uring_peek_cqe: %s\n",
    185							strerror(-ret));
    186				return 1;
    187			}
    188			if (!cqe)
    189				break;
    190
    191			data = io_uring_cqe_get_data(cqe);
    192			if (cqe->res < 0) {
    193				if (cqe->res == -EAGAIN) {
    194					queue_prepped(ring, data);
    195					io_uring_cqe_seen(ring, cqe);
    196					continue;
    197				}
    198				fprintf(stderr, "cqe failed: %s\n",
    199						strerror(-cqe->res));
    200				return 1;
    201			} else if (cqe->res != data->iov.iov_len) {
    202				/* Short read/write, adjust and requeue */
    203				data->iov.iov_base += cqe->res;
    204				data->iov.iov_len -= cqe->res;
    205				data->offset += cqe->res;
    206				queue_prepped(ring, data);
    207				io_uring_cqe_seen(ring, cqe);
    208				continue;
    209			}
    210
    211			/*
    212			 * All done. if write, nothing else to do. if read,
    213			 * queue up corresponding write.
    214			 */
    215			if (data->read) {
    216				queue_write(ring, data);
    217				write_left -= data->first_len;
    218				reads--;
    219				writes++;
    220			} else {
    221				free(data);
    222				writes--;
    223			}
    224			io_uring_cqe_seen(ring, cqe);
    225		}
    226	}
    227
    228	/* wait out pending writes */
    229	while (writes) {
    230		struct io_data *data;
    231
    232		ret = io_uring_wait_cqe(ring, &cqe);
    233		if (ret) {
    234			fprintf(stderr, "wait_cqe=%d\n", ret);
    235			return 1;
    236		}
    237		if (cqe->res < 0) {
    238			fprintf(stderr, "write res=%d\n", cqe->res);
    239			return 1;
    240		}
    241		data = io_uring_cqe_get_data(cqe);
    242		free(data);
    243		writes--;
    244		io_uring_cqe_seen(ring, cqe);
    245	}
    246
    247	return 0;
    248}
    249
    250int main(int argc, char *argv[])
    251{
    252	struct io_uring ring;
    253	off_t insize;
    254	int ret;
    255
    256	if (argc < 3) {
    257		printf("%s: infile outfile\n", argv[0]);
    258		return 1;
    259	}
    260
    261	infd = open(argv[1], O_RDONLY);
    262	if (infd < 0) {
    263		perror("open infile");
    264		return 1;
    265	}
    266	outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
    267	if (outfd < 0) {
    268		perror("open outfile");
    269		return 1;
    270	}
    271
    272	if (setup_context(QD, &ring))
    273		return 1;
    274	if (get_file_size(infd, &insize))
    275		return 1;
    276
    277	ret = copy_file(&ring, insize);
    278
    279	close(infd);
    280	close(outfd);
    281	io_uring_queue_exit(&ring);
    282	return ret;
    283}