cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit d8a20a54b05e2c9adb5198b1439608391450dd0b
parent 576e8dc70825a04af3ac9890491d348959bf19ce
Author: Louis Burda <quent.burda@gmail.com>
Date:   Mon,  8 Aug 2022 19:21:29 +0200

Added ioctl interface and debuged single access evictions

Diffstat:
M.gitignore | 1+
MMakefile | 3+++
Aaccess.c | 29+++++++++++++++++++++++++++++
Aaccess.sh | 8++++++++
Mpatch.diff | 141++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------
Msrc/asm.h | 26+++++++++-----------------
Msrc/cache_types.h | 2--
Msrc/cachepc.c | 27+++++++++++++++++++++++----
Msrc/cachepc.h | 71++++++++++++++++++++++-------------------------------------------------
Asrc/cachepc_user.h | 6++++++
Msrc/util.c | 19+++++++++++++++++--
11 files changed, 243 insertions(+), 90 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -4,3 +4,4 @@ push.sh *.o read .vscode +access diff --git a/Makefile b/Makefile @@ -30,6 +30,9 @@ load: read: read.c $(CC) -o $@ $< +access: access.c src/cachepc_user.h + $(CC) -o $@ $< -I src + test: load read @./read diff --git a/access.c b/access.c @@ -0,0 +1,29 @@ +#include <stdlib.h> +#include <stdio.h> +#include <fcntl.h> +#include <stdint.h> +#include <err.h> +#include <fcntl.h> +#include <unistd.h> +#include <stropts.h> + +#include "cachepc_user.h" + +int +main(int argc, const char **argv) +{ + size_t i, len; + int fd, ret; + int count; + + fd = open("/proc/cachepc", O_RDONLY); + if (fd < 0) err(1, "open"); + + for (i = 0; i < 50; i++) { + ret = ioctl(fd, CACHEPC_IOCTL_ACCESS_TEST, &count); + if (ret == -1) err(1, "ioctl fail"); + printf("%i\n", count); + } + + close(fd); +} diff --git a/access.sh b/access.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +for i in $(seq 0 100); do + echo -n "\rRun $i" + bash build.sh load 1>/dev/null +done +echo "" +dmesg -k | grep "CachePC:" | grep "access test" | tail -n100 diff --git a/patch.diff b/patch.diff @@ -1,7 +1,15 @@ diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile -index b804444e16d4..c94f8c4460f1 100644 +index b804444e16d4..17167ccfca22 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile +@@ -1,6 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + +-ccflags-y += -Iarch/x86/kvm ++ccflags-y += -Iarch/x86/kvm -O2 + ccflags-$(CONFIG_KVM_WERROR) += -Werror + + ifeq ($(CONFIG_FRAME_POINTER),y) @@ -10,7 +10,9 @@ endif KVM := ../../../virt/kvm @@ -24,7 +32,7 @@ index b804444e16d4..c94f8c4460f1 100644 obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 7b3cfbe8f7e3..f9a6b37eb36a 100644 +index 7b3cfbe8f7e3..71697d08e9e4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ @@ -36,7 +44,7 @@ index 7b3cfbe8f7e3..f9a6b37eb36a 100644 #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -3785,8 +3787,19 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, +@@ -3785,8 +3787,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -48,8 +56,7 @@ index 7b3cfbe8f7e3..f9a6b37eb36a 100644 + struct vcpu_svm *svm; + + printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n"); -+ printk(KERN_WARNING "Vincent CachePC: svm_cpu_enter_exit()\n"); -+ cachepc_init_counters(); ++ + if (!ctx) ctx = cachepc_get_ctx(L1); + if (!ds) ds = cachepc_prepare_ds(ctx); @@ -57,7 +64,7 @@ index 7b3cfbe8f7e3..f9a6b37eb36a 100644 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; -@@ -3835,8 +3848,15 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +@@ -3835,8 +3847,14 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) */ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); @@ -67,13 +74,12 @@ index 7b3cfbe8f7e3..f9a6b37eb36a 100644 + cachepc_probe(head); + //cachepc_print_msrmts(head); -+ printk(KERN_WARNING "Vincent: Saving measurements\n"); + cachepc_save_msrmts(head); + /* * We do not use IBRS in the kernel. If this vCPU has used the * SPEC_CTRL MSR it may have left it on; save the value and -@@ -3912,6 +3932,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +@@ -3912,6 +3930,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu)) return EXIT_FASTPATH_NONE; @@ -83,7 +89,7 @@ index 7b3cfbe8f7e3..f9a6b37eb36a 100644 } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c -index 2541a17ff1c4..1c3c3b63baba 100644 +index 2541a17ff1c4..116ca17af03a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -51,6 +51,9 @@ @@ -124,7 +130,7 @@ index 2541a17ff1c4..1c3c3b63baba 100644 __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting); -@@ -4765,12 +4782,95 @@ static void check_processor_compat(void *data) +@@ -4765,12 +4782,197 @@ static void check_processor_compat(void *data) *c->ret = kvm_arch_check_processor_compat(c->opaque); } @@ -175,6 +181,78 @@ index 2541a17ff1c4..1c3c3b63baba 100644 +} + +void ++kvm_cachepc_single_access_test(void *p) ++{ ++ cacheline *ptr; ++ uint64_t pre, post; ++ volatile register uint64_t i asm("r11"); ++ int *cnt; ++ ++ cnt = p; ++ ++ ptr = cachepc_prepare_victim(cachepc_ctx, 48); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ cachepc_prime(cachepc_ds); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ for (i = 0; i < 100000000LLU; i++); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ pre = cachepc_readpmc(0); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ pre += cachepc_readpmc(1); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ pre += cachepc_readpmc(2); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ cachepc_victim(ptr); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ for (i = 0; i < 100000000LLU; i++); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ post = cachepc_readpmc(0); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ post += cachepc_readpmc(1); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ post += cachepc_readpmc(2); ++ ++ cachepc_mfence(); ++ cachepc_cpuid(); ++ ++ printk(KERN_WARNING "CachePC: Single access test done, result: %llu", post - pre); ++ ++ if (cnt) *cnt = post - pre; ++ ++ cachepc_release_victim(cachepc_ctx, ptr); ++} ++ ++void +kvm_cachepc_single_eviction_test(void *p) +{ + cacheline *head; @@ -182,11 +260,11 @@ index 2541a17ff1c4..1c3c3b63baba 100644 + + ptr = cachepc_prepare_victim(cachepc_ctx, 48); + head = cachepc_prime(cachepc_ds); -+ cachepc_victim(ptr); ++ //cachepc_victim(ptr); + cachepc_probe(head); + -+ printk(KERN_WARNING "CachePC: Test done, results:"); -+ cachepc_print_msrmts(head); ++ printk(KERN_WARNING "CachePC: Single eviction test done\n"); ++ //cachepc_print_msrmts(head); + cachepc_save_msrmts(head); + + cachepc_release_victim(cachepc_ctx, ptr); @@ -197,6 +275,8 @@ index 2541a17ff1c4..1c3c3b63baba 100644 +{ + int cpu; + ++ local_irq_disable(); ++ + cpu = get_cpu(); + + printk(KERN_WARNING "CachePC: Running on core %i\n", cpu); @@ -206,15 +286,43 @@ index 2541a17ff1c4..1c3c3b63baba 100644 + cachepc_ctx = cachepc_get_ctx(L1); + cachepc_ds = cachepc_prepare_ds(cachepc_ctx); + ++ kvm_cachepc_single_access_test(p); + kvm_cachepc_single_eviction_test(p); + + put_cpu(); ++ ++ local_irq_enable(); ++} ++ ++long ++kvm_cachepc_ioctl(struct file *file, unsigned int cmd, unsigned long argp) ++{ ++ int r; ++ void __user *arg_user; ++ int cnt; ++ ++ arg_user = (void __user *)argp; ++ switch (cmd) { ++ case CACHEPC_IOCTL_ACCESS_TEST: ++ printk(KERN_WARNING "CachePC: ioctl access test\n"); ++ r = smp_call_function_single(2, ++ kvm_cachepc_single_access_test, &cnt, true); ++ WARN_ON(r != 0); ++ if (arg_user) { ++ if (copy_to_user(arg_user, &cnt, sizeof(int))) ++ return -EFAULT; ++ } ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; +} + int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, struct module *module) { -+ printk(KERN_WARNING "Vincent: KVM Init called\n"); struct kvm_cpu_compat_check c; - int r; - int cpu; @@ -222,7 +330,7 @@ index 2541a17ff1c4..1c3c3b63baba 100644 r = kvm_arch_init(opaque); if (r) -@@ -4848,6 +4948,20 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, +@@ -4848,6 +5050,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); @@ -238,12 +346,13 @@ index 2541a17ff1c4..1c3c3b63baba 100644 + cachepc_proc_ops.proc_read = kvm_cachepc_read; + cachepc_proc_ops.proc_write = kvm_cachepc_write; + cachepc_proc_ops.proc_release = kvm_cachepc_close; ++ cachepc_proc_ops.proc_ioctl = kvm_cachepc_ioctl; + proc_create("cachepc", 0644, NULL, &cachepc_proc_ops); + return 0; out_unreg: -@@ -4872,6 +4986,12 @@ EXPORT_SYMBOL_GPL(kvm_init); +@@ -4872,6 +5089,12 @@ EXPORT_SYMBOL_GPL(kvm_init); void kvm_exit(void) { diff --git a/src/asm.h b/src/asm.h @@ -22,21 +22,20 @@ static inline void cachepc_mfence(void); __attribute__((always_inline)) static inline void cachepc_readq(void *p); -__attribute__((always_inline)) -static inline void cachepc_victim(void *p); - uint64_t cachepc_readpmc(uint64_t event) { uint32_t lo, hi; + event = 0xC0010201 + 2 * event; + asm volatile ( "rdmsr" : "=a" (lo), "=d" (hi) : "c"(event) ); - return ((uint64_t) hi << 32) | (uint64_t)lo; + return ((uint64_t) hi << 32) | (uint64_t) lo; } void @@ -54,29 +53,29 @@ cachepc_lfence(void) { asm volatile( "lfence\n\t" - :: + ::: "memory" ); } -inline void +void cachepc_sfence(void) { asm volatile( "sfence\n\t" - :: + ::: "memory" ); } -inline void +void cachepc_mfence(void) { asm volatile( "mfence\n\t" - :: + ::: "memory" ); } -inline void +void cachepc_readq(void *p) { asm volatile ( @@ -84,10 +83,3 @@ cachepc_readq(void *p) : : "r" (p) : "r10" ); } - -inline void -cachepc_victim(void *p) -{ - cachepc_mfence(); - cachepc_readq(p); -} diff --git a/src/cache_types.h b/src/cache_types.h @@ -2,8 +2,6 @@ #include "device_conf.h" -#include <linux/build_bug.h> - #define SET_MASK(SETS) (((((uintptr_t) SETS) * CACHELINE_SIZE) - 1) ^ (CACHELINE_SIZE - 1)) #define REMOVE_PAGE_OFFSET(ptr) ((void *) (((uintptr_t) ptr) & PAGE_MASK)) diff --git a/src/cachepc.c b/src/cachepc.c @@ -1,5 +1,11 @@ #include "cachepc.h" +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/ioctl.h> + static void cl_insert(cacheline *last_cl, cacheline *new_cl); static void *remove_cache_set(cache_ctx *ctx, void *ptr); static void *remove_cache_group_set(void *ptr); @@ -26,8 +32,8 @@ cachepc_init_counters(void) */ reg_addr = 0xc0010200; - event_no = 0x64;//0x29;//0x64; - event_mask = 0x08; //0x07; //0x08; + event_no = 0x70; + event_mask = 0xFF; event = event_no | (event_mask << 8); event |= (1ULL << 17); /* OS (kernel) events only */ event |= (1ULL << 22); /* enable performance counter */ @@ -36,14 +42,25 @@ cachepc_init_counters(void) asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00)); reg_addr = 0xc0010202; - event_no = 0x64; - event_mask = 0xF0; + event_no = 0x71; + event_mask = 0xFF; + event = event_no | (event_mask << 8); + event |= (1ULL << 17); /* OS (kernel) events only */ + event |= (1ULL << 22); /* enable performance counter */ + event |= (1ULL << 40); /* Host events only */ + printk(KERN_WARNING "CachePC: Initialized event %llu\n", event); + asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00)); + + reg_addr = 0xc0010204; + event_no = 0x72; + event_mask = 0xFF; event = event_no | (event_mask << 8); event |= (1ULL << 17); /* OS (kernel) events only */ event |= (1ULL << 22); /* enable performance counter */ event |= (1ULL << 40); /* Host events only */ printk(KERN_WARNING "CachePC: Initialized event %llu\n", event); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00)); + } cache_ctx * @@ -149,6 +166,8 @@ cachepc_save_msrmts(cacheline *head) { cacheline *curr_cl; + printk(KERN_WARNING "CachePC: Updating /proc/cachepc\n"); + curr_cl = head; do { if (IS_FIRST(curr_cl->flags)) { diff --git a/src/cachepc.h b/src/cachepc.h @@ -1,16 +1,9 @@ #pragma once -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/delay.h> - #include "asm.h" #include "cache_types.h" #include "util.h" - -#define L2_MISS_CNTR 0xC0010201 -#define L2_HIT_CNTR 0xC0010203 +#include "cachepc_user.h" void cachepc_init_counters(void); @@ -32,12 +25,12 @@ static inline cacheline *cachepc_prime(cacheline *head); __attribute__((always_inline)) static inline cacheline *cachepc_prime_rev(cacheline *head); -//__attribute__((always_inline)) -//static inline cacheline *cachepc_probe_set(cacheline *curr_cl); - __attribute__((always_inline)) static inline cacheline *cachepc_probe(cacheline *head); +__attribute__((always_inline)) +static inline void cachepc_victim(void *p); + extern uint16_t *cachepc_msrmts; extern size_t cachepc_msrmts_count; @@ -101,23 +94,20 @@ cachepc_prime_rev(cacheline *head) cacheline * cachepc_probe(cacheline *start_cl) { - uint64_t pre1, pre2; - uint64_t post1, post2; - volatile int i = 0; + uint64_t pre, post; cacheline *next_cl; cacheline *curr_cl; + volatile register uint64_t i asm("r12"); curr_cl = start_cl; do { - cachepc_cpuid(); + pre = cachepc_readpmc(0); + pre += cachepc_readpmc(1); + cachepc_mfence(); - - pre1 = cachepc_readpmc(L2_HIT_CNTR); - pre2 = cachepc_readpmc(L2_MISS_CNTR); - cachepc_cpuid(); - cachepc_mfence(); + asm volatile( "mov 8(%[curr_cl]), %%rax \n\t" // +8 "mov 8(%%rax), %%rcx \n\t" // +16 @@ -133,45 +123,28 @@ cachepc_probe(cacheline *start_cl) : "rax", "rcx" ); - cachepc_cpuid(); cachepc_mfence(); + cachepc_cpuid(); + post = cachepc_readpmc(0); + post += cachepc_readpmc(1); - cachepc_cpuid(); cachepc_mfence(); - - //msleep(100); - //for(i=0; i<100000; ++i){ - //} - - post1 = cachepc_readpmc(L2_HIT_CNTR); - cachepc_cpuid(); - post2 = cachepc_readpmc(L2_MISS_CNTR); cachepc_cpuid(); /* works across size boundary */ - curr_cl->count = 0; - curr_cl->count += post1 - pre1; - curr_cl->count += post2 - pre2; + curr_cl->count = post - pre; + curr_cl = next_cl; } while (__builtin_expect(curr_cl != start_cl, 1)); return curr_cl->next; } -// static inline cacheline * -// cachepc_probe(cacheline *head) -// { -// cacheline *curr_cs; -// -// //printk(KERN_WARNING "CachePC: Probing.."); -// -// curr_cs = head; -// do { -// curr_cs = cachepc_probe_set(curr_cs); -// } while (__builtin_expect(curr_cs != head, 1)); -// -// //printk(KERN_WARNING "CachePC: Probing done"); -// -// return curr_cs->next; -// } +void +cachepc_victim(void *p) +{ + cachepc_cpuid(); + cachepc_mfence(); + cachepc_readq(p); +} diff --git a/src/cachepc_user.h b/src/cachepc_user.h @@ -0,0 +1,6 @@ +#pragma once + +#include <linux/ioctl.h> + +#define CACHEPC_IOCTL_MAGIC 0xBF +#define CACHEPC_IOCTL_ACCESS_TEST _IOR(CACHEPC_IOCTL_MAGIC, 0, int) diff --git a/src/util.c b/src/util.c @@ -1,6 +1,21 @@ #include "util.h" -#include <linux/random.h> +static size_t random_pos = 0; +static uint8_t random[] = { 90, 227, 179, 229, 27, 117, 69, 81, 188, 253, 129, 140, 140, 180, 191, 152, 194, 98, 169, 205, 254, 155, 249, 81, 208, 245, 186, 80, 81, 50, 63, 67, 200, 108, 70, 32, 239, 158, 38, 234, 183, 130, 141, 175, 39, 230, 107, 199, 59, 43, 238, 122, 103, 25, 184, 66, 31, 239, 57, 92, 119, 101, 147, 188, 171, 112, 209, 227, 92, 224, 9, 150, 220, 10, 154, 92, 86, 39, 154, 140, 65, 57, 158, 47, 142, 168, 222, 200, 69, 183, 160, 249, 103, 45, 241, 112, 49, 85, 2, 73, 255, 16, 132, 215, 190, 143, 215, 128, 119, 75, 136, 112, 67, 27, 213, 78, 127, 1, 197, 18, 122, 216, 123, 244, 11, 154, 124, 212, 171, 29, 184, 45, 42, 128, 124, 168, 112, 191, 139, 136, 20, 127, 169, 75, 220, 4, 162, 207, 80, 147, 25, 39, 232, 219, 100, 13, 199, 88, 19, 40, 141, 2, 16, 109, 40, 127, 47, 60, 221, 151, 156, 115, 182, 198, 231, 193, 36, 89, 127, 31, 187, 47, 109, 70, 75, 115, 221, 236, 46, 65, 151, 48, 185, 157, 177, 152, 134, 38, 246, 146, 15, 67, 80, 192, 74, 244, 250, 194, 21, 19, 151, 199, 124, 9, 174, 171, 239, 146, 213, 214, 226, 137, 237, 13, 92, 87, 10, 144, 21, 143, 158, 130, 129, 176, 40, 25, 247, 182, 90, 226, 14, 199, 219, 242, 52, 225, 154, 218, 242, 191, 53, 253, 36, 62, 154, 13, 145, 182, 72, 234, 140, 166, 125, 93, 236, 14, 40, 183, 48, 138, 240, 243, 100, 119, 160, 73, 182, 204, 130, 108, 80, 226, 13, 36, 118, 245, 85, 205, 131, 110, 69, 116, 130, 211, 243, 182, 180, 28, 197, 224, 245, 78, 122, 135, 194, 31, 138, 178, 194, 150, 42, 190, 7, 217, 100, 19, 161, 154, 237, 76, 135, 63, 2, 33, 229, 164, 223, 175, 0, 51, 177, 78, 13, 241, 198, 152, 109, 166, 92, 226, 42, 213, 148, 149, 144, 39, 20, 51, 239, 153, 56, 198, 190, 165, 243, 108, 66, 132, 127, 179, 182, 211, 207, 107, 223, 188, 198, 103, 147, 127, 87, 187, 137, 123, 72, 141, 156, 28, 76, 234, 244, 108, 176, 227, 221, 26, 110, 81, 28, 187, 14, 24, 82, 218, 201, 156, 20, 184, 105, 117, 188, 132, 243, 11, 13, 188, 243, 181, 98, 136, 124, 152, 254, 228, 221, 114, 140, 103, 44, 55, 147, 227, 241, 96, 198, 27, 98, 35, 179, 6, 244, 17, 152, 128, 44, 75, 8, 18, 122, 79, 244, 210, 8, 168, 99, 80, 19, 100, 38, 6, 243, 216, 200, 105, 164, 29, 171, 232, 247, 218, 17, 133, 232, 68, 140, 100, 106, 49, 17, 90, 178, 38, 69, 238, 23, 174, 180, 90, 18, 12, 71, 45, 101, 200, 83, 77, 95, 218, 91, 176, 63, 179, 203, 125, 56, 171, 218, 98, 135, 127, 214, 63, 41, 151, 197, 157, 192, 152, 67, 67, 157, 54, 123, 111, 118, 45, 94, 15, 81, 123, 125, 169, 67, 50, 150, 113, 147, 13, 16, 86, 2, 135, 129, 88, 154, 246, 170, 223, 47, 247, 190, 187, 35, 213, 194, 67, 226, 181, 208, 135, 75, 30, 233, 136, 45, 222, 121, 60, 157, 48, 171, 244, 52, 40, 187, 8, 23, 173, 41, 157, 165, 158, 92, 139, 22, 95, 72, 164, 142, 213, 156, 102, 196, 108, 228, 203, 99, 72, 254, 173, 37, 212, 150, 145, 104, 76, 117, 242, 185, 180, 108, 50, 188, 206, 40, 52, 55, 147, 240, 89, 248, 203, 110, 237, 24, 88, 63, 99, 224, 121, 229, 90, 253, 12, 72, 24, 3, 247, 127, 35, 178, 198, 80, 151, 223, 243, 195, 114, 5, 134, 250, 85, 182, 154, 206, 41, 53, 50, 59, 174, 117, 203, 200, 33, 182, 230, 147, 101, 36, 111, 23, 187, 130, 16, 211, 90, 102, 207, 154, 140, 123, 212, 66, 45, 35, 165, 139, 109, 169, 226, 210, 115, 16, 92, 196, 31, 245, 154, 110, 181, 161, 126, 184, 177, 237, 125, 181, 71, 120, 86, 222, 179, 133, 113, 72, 206, 157, 89, 162, 80, 164, 223, 38, 17, 238, 114, 188, 125, 69, 1, 28, 126, 249, 180, 189, 144, 215, 152, 89, 92, 62, 98, 151, 242, 46, 48, 162, 3, 95, 211, 122, 217, 36, 235, 109, 100, 94, 233, 173, 150, 71, 125, 201, 168, 4, 180, 248, 249, 240, 50, 206, 242, 169, 201, 31, 137, 198, 93, 241, 219, 11, 9, 1, 229, 249, 194, 67, 41, 143, 117, 103, 238, 247, 72, 178, 21, 193, 146, 119, 159, 21, 253, 206, 66, 186, 60, 200, 102, 179, 117, 103, 32, 0, 116, 31, 133, 129, 127, 38, 5, 177, 195, 25, 23, 86, 29, 222, 53, 0, 140, 179, 118, 239, 141, 237, 122, 80, 200, 92, 47, 15, 58, 167, 26, 37, 146, 254, 3, 74, 148, 159, 221, 38, 68, 110, 98, 82, 16, 171, 232, 139, 72, 87, 114, 113, 61, 210, 82, 180, 196, 8, 14, 249, 185, 159, 253, 166, 200, 82, 176, 112, 173, 246, 40, 22, 202, 140, 76, 60, 92, 225, 10, 198, 41, 26, 223, 250, 181, 135, 196, 230, 10, 103, 197, 128, 155, 148, 121, 150, 51, 196, 143, 183, 153, 229, 93, 118, 12, 235, 237, 105, 73, 27, 24, 86, 248, 39, 190, 71, 184, 212, 74, 196, 181, 46, 140, 9, 18, 168, 110, 30, 93, 166, 44, 153, 88, 82, 148, 237, 146, 173, 158, 29, 215, 202, 3, 224, 240, 186, 202, 52, 123, 244, 226, 109, 79, 174, 245, 35, 242, 82, 187, 101, 69, 245, 104, 139, 118, 134, 236, 135, 243, 10, 149, 162, 212, 245, 132, 3, 90, 38, 96, 28, 98, 200, 80, 141, 252, 40, 214, 80, 152, 221, 239, 166, 135, 104, 105, 227, 248, 102, 53, 78, 186, 95, 15, 97, 58, 129, 98, 219, 233, 167, 89, 198, 175, 98, 77, 20, 182, 112, 104, 165, 34 }; + +void +prng_bytes(uint8_t *dst, size_t size) +{ + size_t i; + + if (random_pos + size > sizeof(random)) + random_pos = 0; + + for (i = 0; i < size; i++) + dst[i] = random[random_pos + i]; + + random_pos += size; +} void random_perm(uint32_t *arr, uint32_t arr_len) @@ -8,7 +23,7 @@ random_perm(uint32_t *arr, uint32_t arr_len) uint32_t i, idx, tmp; for (i = arr_len - 1; i > 0; --i) { - get_random_bytes(&idx, 4); + prng_bytes((void*)&idx, 4); idx = idx % i; tmp = arr[idx];