map_perf_test_kern.c (6676B)
1/* Copyright (c) 2016 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7#include <linux/skbuff.h> 8#include <linux/netdevice.h> 9#include <linux/version.h> 10#include <uapi/linux/bpf.h> 11#include <bpf/bpf_helpers.h> 12#include <bpf/bpf_tracing.h> 13#include <bpf/bpf_core_read.h> 14#include "trace_common.h" 15 16#define MAX_ENTRIES 1000 17#define MAX_NR_CPUS 1024 18 19struct { 20 __uint(type, BPF_MAP_TYPE_HASH); 21 __type(key, u32); 22 __type(value, long); 23 __uint(max_entries, MAX_ENTRIES); 24} hash_map SEC(".maps"); 25 26struct { 27 __uint(type, BPF_MAP_TYPE_LRU_HASH); 28 __type(key, u32); 29 __type(value, long); 30 __uint(max_entries, 10000); 31} lru_hash_map SEC(".maps"); 32 33struct { 34 __uint(type, BPF_MAP_TYPE_LRU_HASH); 35 __type(key, u32); 36 __type(value, long); 37 __uint(max_entries, 10000); 38 __uint(map_flags, BPF_F_NO_COMMON_LRU); 39} nocommon_lru_hash_map SEC(".maps"); 40 41struct inner_lru { 42 __uint(type, BPF_MAP_TYPE_LRU_HASH); 43 __type(key, u32); 44 __type(value, long); 45 __uint(max_entries, MAX_ENTRIES); 46 __uint(map_flags, BPF_F_NUMA_NODE); 47 __uint(numa_node, 0); 48} inner_lru_hash_map SEC(".maps"); 49 50struct { 51 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); 52 __uint(max_entries, MAX_NR_CPUS); 53 __uint(key_size, sizeof(u32)); 54 __array(values, struct inner_lru); /* use inner_lru as inner map */ 55} array_of_lru_hashs SEC(".maps") = { 56 /* statically initialize the first element */ 57 .values = { &inner_lru_hash_map }, 58}; 59 60struct { 61 __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 62 __uint(key_size, sizeof(u32)); 63 __uint(value_size, sizeof(long)); 64 __uint(max_entries, MAX_ENTRIES); 65} percpu_hash_map SEC(".maps"); 66 67struct { 68 __uint(type, BPF_MAP_TYPE_HASH); 69 __type(key, u32); 70 __type(value, long); 71 __uint(max_entries, MAX_ENTRIES); 72 __uint(map_flags, BPF_F_NO_PREALLOC); 73} hash_map_alloc SEC(".maps"); 74 75struct { 76 __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 77 __uint(key_size, sizeof(u32)); 78 __uint(value_size, sizeof(long)); 79 __uint(max_entries, MAX_ENTRIES); 80 __uint(map_flags, BPF_F_NO_PREALLOC); 81} percpu_hash_map_alloc SEC(".maps"); 82 83struct { 84 __uint(type, BPF_MAP_TYPE_LPM_TRIE); 85 __uint(key_size, 8); 86 __uint(value_size, sizeof(long)); 87 __uint(max_entries, 10000); 88 __uint(map_flags, BPF_F_NO_PREALLOC); 89} lpm_trie_map_alloc SEC(".maps"); 90 91struct { 92 __uint(type, BPF_MAP_TYPE_ARRAY); 93 __type(key, u32); 94 __type(value, long); 95 __uint(max_entries, MAX_ENTRIES); 96} array_map SEC(".maps"); 97 98struct { 99 __uint(type, BPF_MAP_TYPE_LRU_HASH); 100 __type(key, u32); 101 __type(value, long); 102 __uint(max_entries, MAX_ENTRIES); 103} lru_hash_lookup_map SEC(".maps"); 104 105SEC("kprobe/" SYSCALL(sys_getuid)) 106int stress_hmap(struct pt_regs *ctx) 107{ 108 u32 key = bpf_get_current_pid_tgid(); 109 long init_val = 1; 110 long *value; 111 112 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY); 113 value = bpf_map_lookup_elem(&hash_map, &key); 114 if (value) 115 bpf_map_delete_elem(&hash_map, &key); 116 117 return 0; 118} 119 120SEC("kprobe/" SYSCALL(sys_geteuid)) 121int stress_percpu_hmap(struct pt_regs *ctx) 122{ 123 u32 key = bpf_get_current_pid_tgid(); 124 long init_val = 1; 125 long *value; 126 127 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY); 128 value = bpf_map_lookup_elem(&percpu_hash_map, &key); 129 if (value) 130 bpf_map_delete_elem(&percpu_hash_map, &key); 131 return 0; 132} 133 134SEC("kprobe/" SYSCALL(sys_getgid)) 135int stress_hmap_alloc(struct pt_regs *ctx) 136{ 137 u32 key = bpf_get_current_pid_tgid(); 138 long init_val = 1; 139 long *value; 140 141 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY); 142 value = bpf_map_lookup_elem(&hash_map_alloc, &key); 143 if (value) 144 bpf_map_delete_elem(&hash_map_alloc, &key); 145 return 0; 146} 147 148SEC("kprobe/" SYSCALL(sys_getegid)) 149int stress_percpu_hmap_alloc(struct pt_regs *ctx) 150{ 151 u32 key = bpf_get_current_pid_tgid(); 152 long init_val = 1; 153 long *value; 154 155 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY); 156 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key); 157 if (value) 158 bpf_map_delete_elem(&percpu_hash_map_alloc, &key); 159 return 0; 160} 161 162SEC("kprobe/" SYSCALL(sys_connect)) 163int stress_lru_hmap_alloc(struct pt_regs *ctx) 164{ 165 struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); 166 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; 167 union { 168 u16 dst6[8]; 169 struct { 170 u16 magic0; 171 u16 magic1; 172 u16 tcase; 173 u16 unused16; 174 u32 unused32; 175 u32 key; 176 }; 177 } test_params; 178 struct sockaddr_in6 *in6; 179 u16 test_case; 180 int addrlen, ret; 181 long val = 1; 182 u32 key = 0; 183 184 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); 185 addrlen = (int)PT_REGS_PARM3_CORE(real_regs); 186 187 if (addrlen != sizeof(*in6)) 188 return 0; 189 190 ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6), 191 &in6->sin6_addr); 192 if (ret) 193 goto done; 194 195 if (test_params.magic0 != 0xdead || 196 test_params.magic1 != 0xbeef) 197 return 0; 198 199 test_case = test_params.tcase; 200 if (test_case != 3) 201 key = bpf_get_prandom_u32(); 202 203 if (test_case == 0) { 204 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY); 205 } else if (test_case == 1) { 206 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val, 207 BPF_ANY); 208 } else if (test_case == 2) { 209 void *nolocal_lru_map; 210 int cpu = bpf_get_smp_processor_id(); 211 212 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs, 213 &cpu); 214 if (!nolocal_lru_map) { 215 ret = -ENOENT; 216 goto done; 217 } 218 219 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val, 220 BPF_ANY); 221 } else if (test_case == 3) { 222 u32 i; 223 224 key = test_params.key; 225 226#pragma clang loop unroll(full) 227 for (i = 0; i < 32; i++) { 228 bpf_map_lookup_elem(&lru_hash_lookup_map, &key); 229 key++; 230 } 231 } else { 232 ret = -EINVAL; 233 } 234 235done: 236 if (ret) 237 bpf_trace_printk(fmt, sizeof(fmt), ret); 238 239 return 0; 240} 241 242SEC("kprobe/" SYSCALL(sys_gettid)) 243int stress_lpm_trie_map_alloc(struct pt_regs *ctx) 244{ 245 union { 246 u32 b32[2]; 247 u8 b8[8]; 248 } key; 249 unsigned int i; 250 251 key.b32[0] = 32; 252 key.b8[4] = 192; 253 key.b8[5] = 168; 254 key.b8[6] = 0; 255 key.b8[7] = 1; 256 257#pragma clang loop unroll(full) 258 for (i = 0; i < 32; ++i) 259 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key); 260 261 return 0; 262} 263 264SEC("kprobe/" SYSCALL(sys_getpgid)) 265int stress_hash_map_lookup(struct pt_regs *ctx) 266{ 267 u32 key = 1, i; 268 long *value; 269 270#pragma clang loop unroll(full) 271 for (i = 0; i < 64; ++i) 272 value = bpf_map_lookup_elem(&hash_map, &key); 273 274 return 0; 275} 276 277SEC("kprobe/" SYSCALL(sys_getppid)) 278int stress_array_map_lookup(struct pt_regs *ctx) 279{ 280 u32 key = 1, i; 281 long *value; 282 283#pragma clang loop unroll(full) 284 for (i = 0; i < 64; ++i) 285 value = bpf_map_lookup_elem(&array_map, &key); 286 287 return 0; 288} 289 290char _license[] SEC("license") = "GPL"; 291u32 _version SEC("version") = LINUX_VERSION_CODE;