dispatcher.c (3716B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright(c) 2019 Intel Corporation. */ 3 4#include <linux/hash.h> 5#include <linux/bpf.h> 6#include <linux/filter.h> 7 8/* The BPF dispatcher is a multiway branch code generator. The 9 * dispatcher is a mechanism to avoid the performance penalty of an 10 * indirect call, which is expensive when retpolines are enabled. A 11 * dispatch client registers a BPF program into the dispatcher, and if 12 * there is available room in the dispatcher a direct call to the BPF 13 * program will be generated. All calls to the BPF programs called via 14 * the dispatcher will then be a direct call, instead of an 15 * indirect. The dispatcher hijacks a trampoline function it via the 16 * __fentry__ of the trampoline. The trampoline function has the 17 * following signature: 18 * 19 * unsigned int trampoline(const void *ctx, const struct bpf_insn *insnsi, 20 * unsigned int (*bpf_func)(const void *, 21 * const struct bpf_insn *)); 22 */ 23 24static struct bpf_dispatcher_prog *bpf_dispatcher_find_prog( 25 struct bpf_dispatcher *d, struct bpf_prog *prog) 26{ 27 int i; 28 29 for (i = 0; i < BPF_DISPATCHER_MAX; i++) { 30 if (prog == d->progs[i].prog) 31 return &d->progs[i]; 32 } 33 return NULL; 34} 35 36static struct bpf_dispatcher_prog *bpf_dispatcher_find_free( 37 struct bpf_dispatcher *d) 38{ 39 return bpf_dispatcher_find_prog(d, NULL); 40} 41 42static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d, 43 struct bpf_prog *prog) 44{ 45 struct bpf_dispatcher_prog *entry; 46 47 if (!prog) 48 return false; 49 50 entry = bpf_dispatcher_find_prog(d, prog); 51 if (entry) { 52 refcount_inc(&entry->users); 53 return false; 54 } 55 56 entry = bpf_dispatcher_find_free(d); 57 if (!entry) 58 return false; 59 60 bpf_prog_inc(prog); 61 entry->prog = prog; 62 refcount_set(&entry->users, 1); 63 d->num_progs++; 64 return true; 65} 66 67static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, 68 struct bpf_prog *prog) 69{ 70 struct bpf_dispatcher_prog *entry; 71 72 if (!prog) 73 return false; 74 75 entry = bpf_dispatcher_find_prog(d, prog); 76 if (!entry) 77 return false; 78 79 if (refcount_dec_and_test(&entry->users)) { 80 entry->prog = NULL; 81 bpf_prog_put(prog); 82 d->num_progs--; 83 return true; 84 } 85 return false; 86} 87 88int __weak arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs) 89{ 90 return -ENOTSUPP; 91} 92 93static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image) 94{ 95 s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0]; 96 int i; 97 98 for (i = 0; i < BPF_DISPATCHER_MAX; i++) { 99 if (d->progs[i].prog) 100 *ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func; 101 } 102 return arch_prepare_bpf_dispatcher(image, &ips[0], d->num_progs); 103} 104 105static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) 106{ 107 void *old, *new; 108 u32 noff; 109 int err; 110 111 if (!prev_num_progs) { 112 old = NULL; 113 noff = 0; 114 } else { 115 old = d->image + d->image_off; 116 noff = d->image_off ^ (PAGE_SIZE / 2); 117 } 118 119 new = d->num_progs ? d->image + noff : NULL; 120 if (new) { 121 if (bpf_dispatcher_prepare(d, new)) 122 return; 123 } 124 125 err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new); 126 if (err || !new) 127 return; 128 129 d->image_off = noff; 130} 131 132void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, 133 struct bpf_prog *to) 134{ 135 bool changed = false; 136 int prev_num_progs; 137 138 if (from == to) 139 return; 140 141 mutex_lock(&d->mutex); 142 if (!d->image) { 143 d->image = bpf_jit_alloc_exec_page(); 144 if (!d->image) 145 goto out; 146 bpf_image_ksym_add(d->image, &d->ksym); 147 } 148 149 prev_num_progs = d->num_progs; 150 changed |= bpf_dispatcher_remove_prog(d, from); 151 changed |= bpf_dispatcher_add_prog(d, to); 152 153 if (!changed) 154 goto out; 155 156 bpf_dispatcher_update(d, prev_num_progs); 157out: 158 mutex_unlock(&d->mutex); 159}