xlated_dumper.c (9362B)
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2/* Copyright (C) 2018 Netronome Systems, Inc. */ 3 4#define _GNU_SOURCE 5#include <stdarg.h> 6#include <stdio.h> 7#include <stdlib.h> 8#include <string.h> 9#include <sys/types.h> 10#include <bpf/libbpf.h> 11#include <bpf/libbpf_internal.h> 12 13#include "disasm.h" 14#include "json_writer.h" 15#include "main.h" 16#include "xlated_dumper.h" 17 18static int kernel_syms_cmp(const void *sym_a, const void *sym_b) 19{ 20 return ((struct kernel_sym *)sym_a)->address - 21 ((struct kernel_sym *)sym_b)->address; 22} 23 24void kernel_syms_load(struct dump_data *dd) 25{ 26 struct kernel_sym *sym; 27 char buff[256]; 28 void *tmp, *address; 29 FILE *fp; 30 31 fp = fopen("/proc/kallsyms", "r"); 32 if (!fp) 33 return; 34 35 while (fgets(buff, sizeof(buff), fp)) { 36 tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1, 37 sizeof(*dd->sym_mapping)); 38 if (!tmp) { 39out: 40 free(dd->sym_mapping); 41 dd->sym_mapping = NULL; 42 fclose(fp); 43 return; 44 } 45 dd->sym_mapping = tmp; 46 sym = &dd->sym_mapping[dd->sym_count]; 47 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2) 48 continue; 49 sym->address = (unsigned long)address; 50 if (!strcmp(sym->name, "__bpf_call_base")) { 51 dd->address_call_base = sym->address; 52 /* sysctl kernel.kptr_restrict was set */ 53 if (!sym->address) 54 goto out; 55 } 56 if (sym->address) 57 dd->sym_count++; 58 } 59 60 fclose(fp); 61 62 qsort(dd->sym_mapping, dd->sym_count, 63 sizeof(*dd->sym_mapping), kernel_syms_cmp); 64} 65 66void kernel_syms_destroy(struct dump_data *dd) 67{ 68 free(dd->sym_mapping); 69} 70 71struct kernel_sym *kernel_syms_search(struct dump_data *dd, 72 unsigned long key) 73{ 74 struct kernel_sym sym = { 75 .address = key, 76 }; 77 78 return dd->sym_mapping ? 79 bsearch(&sym, dd->sym_mapping, dd->sym_count, 80 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL; 81} 82 83static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...) 84{ 85 va_list args; 86 87 va_start(args, fmt); 88 vprintf(fmt, args); 89 va_end(args); 90} 91 92static void __printf(2, 3) 93print_insn_for_graph(void *private_data, const char *fmt, ...) 94{ 95 char buf[64], *p; 96 va_list args; 97 98 va_start(args, fmt); 99 vsnprintf(buf, sizeof(buf), fmt, args); 100 va_end(args); 101 102 p = buf; 103 while (*p != '\0') { 104 if (*p == '\n') { 105 memmove(p + 3, p, strlen(buf) + 1 - (p - buf)); 106 /* Align each instruction dump row left. */ 107 *p++ = '\\'; 108 *p++ = 'l'; 109 /* Output multiline concatenation. */ 110 *p++ = '\\'; 111 } else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') { 112 memmove(p + 1, p, strlen(buf) + 1 - (p - buf)); 113 /* Escape special character. */ 114 *p++ = '\\'; 115 } 116 117 p++; 118 } 119 120 printf("%s", buf); 121} 122 123static void __printf(2, 3) 124print_insn_json(void *private_data, const char *fmt, ...) 125{ 126 unsigned int l = strlen(fmt); 127 char chomped_fmt[l]; 128 va_list args; 129 130 va_start(args, fmt); 131 if (l > 0) { 132 strncpy(chomped_fmt, fmt, l - 1); 133 chomped_fmt[l - 1] = '\0'; 134 } 135 jsonw_vprintf_enquote(json_wtr, chomped_fmt, args); 136 va_end(args); 137} 138 139static const char *print_call_pcrel(struct dump_data *dd, 140 struct kernel_sym *sym, 141 unsigned long address, 142 const struct bpf_insn *insn) 143{ 144 if (!dd->nr_jited_ksyms) 145 /* Do not show address for interpreted programs */ 146 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 147 "%+d", insn->off); 148 else if (sym) 149 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 150 "%+d#%s", insn->off, sym->name); 151 else 152 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 153 "%+d#0x%lx", insn->off, address); 154 return dd->scratch_buff; 155} 156 157static const char *print_call_helper(struct dump_data *dd, 158 struct kernel_sym *sym, 159 unsigned long address) 160{ 161 if (sym) 162 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 163 "%s", sym->name); 164 else 165 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 166 "0x%lx", address); 167 return dd->scratch_buff; 168} 169 170static const char *print_call(void *private_data, 171 const struct bpf_insn *insn) 172{ 173 struct dump_data *dd = private_data; 174 unsigned long address = dd->address_call_base + insn->imm; 175 struct kernel_sym *sym; 176 177 if (insn->src_reg == BPF_PSEUDO_CALL && 178 (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms) 179 address = dd->jited_ksyms[insn->imm]; 180 181 sym = kernel_syms_search(dd, address); 182 if (insn->src_reg == BPF_PSEUDO_CALL) 183 return print_call_pcrel(dd, sym, address, insn); 184 else 185 return print_call_helper(dd, sym, address); 186} 187 188static const char *print_imm(void *private_data, 189 const struct bpf_insn *insn, 190 __u64 full_imm) 191{ 192 struct dump_data *dd = private_data; 193 194 if (insn->src_reg == BPF_PSEUDO_MAP_FD) 195 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 196 "map[id:%u]", insn->imm); 197 else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) 198 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 199 "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm); 200 else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) 201 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 202 "map[idx:%u]+%u", insn->imm, (insn + 1)->imm); 203 else if (insn->src_reg == BPF_PSEUDO_FUNC) 204 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 205 "subprog[%+d]", insn->imm); 206 else 207 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 208 "0x%llx", (unsigned long long)full_imm); 209 return dd->scratch_buff; 210} 211 212void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len, 213 bool opcodes, bool linum) 214{ 215 const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo; 216 const struct bpf_insn_cbs cbs = { 217 .cb_print = print_insn_json, 218 .cb_call = print_call, 219 .cb_imm = print_imm, 220 .private_data = dd, 221 }; 222 struct bpf_func_info *record; 223 struct bpf_insn *insn = buf; 224 struct btf *btf = dd->btf; 225 bool double_insn = false; 226 unsigned int nr_skip = 0; 227 char func_sig[1024]; 228 unsigned int i; 229 230 jsonw_start_array(json_wtr); 231 record = dd->func_info; 232 for (i = 0; i < len / sizeof(*insn); i++) { 233 if (double_insn) { 234 double_insn = false; 235 continue; 236 } 237 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 238 239 jsonw_start_object(json_wtr); 240 241 if (btf && record) { 242 if (record->insn_off == i) { 243 btf_dumper_type_only(btf, record->type_id, 244 func_sig, 245 sizeof(func_sig)); 246 if (func_sig[0] != '\0') { 247 jsonw_name(json_wtr, "proto"); 248 jsonw_string(json_wtr, func_sig); 249 } 250 record = (void *)record + dd->finfo_rec_size; 251 } 252 } 253 254 if (prog_linfo) { 255 const struct bpf_line_info *linfo; 256 257 linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip); 258 if (linfo) { 259 btf_dump_linfo_json(btf, linfo, linum); 260 nr_skip++; 261 } 262 } 263 264 jsonw_name(json_wtr, "disasm"); 265 print_bpf_insn(&cbs, insn + i, true); 266 267 if (opcodes) { 268 jsonw_name(json_wtr, "opcodes"); 269 jsonw_start_object(json_wtr); 270 271 jsonw_name(json_wtr, "code"); 272 jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code); 273 274 jsonw_name(json_wtr, "src_reg"); 275 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg); 276 277 jsonw_name(json_wtr, "dst_reg"); 278 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg); 279 280 jsonw_name(json_wtr, "off"); 281 print_hex_data_json((uint8_t *)(&insn[i].off), 2); 282 283 jsonw_name(json_wtr, "imm"); 284 if (double_insn && i < len - 1) 285 print_hex_data_json((uint8_t *)(&insn[i].imm), 286 12); 287 else 288 print_hex_data_json((uint8_t *)(&insn[i].imm), 289 4); 290 jsonw_end_object(json_wtr); 291 } 292 jsonw_end_object(json_wtr); 293 } 294 jsonw_end_array(json_wtr); 295} 296 297void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len, 298 bool opcodes, bool linum) 299{ 300 const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo; 301 const struct bpf_insn_cbs cbs = { 302 .cb_print = print_insn, 303 .cb_call = print_call, 304 .cb_imm = print_imm, 305 .private_data = dd, 306 }; 307 struct bpf_func_info *record; 308 struct bpf_insn *insn = buf; 309 struct btf *btf = dd->btf; 310 unsigned int nr_skip = 0; 311 bool double_insn = false; 312 char func_sig[1024]; 313 unsigned int i; 314 315 record = dd->func_info; 316 for (i = 0; i < len / sizeof(*insn); i++) { 317 if (double_insn) { 318 double_insn = false; 319 continue; 320 } 321 322 if (btf && record) { 323 if (record->insn_off == i) { 324 btf_dumper_type_only(btf, record->type_id, 325 func_sig, 326 sizeof(func_sig)); 327 if (func_sig[0] != '\0') 328 printf("%s:\n", func_sig); 329 record = (void *)record + dd->finfo_rec_size; 330 } 331 } 332 333 if (prog_linfo) { 334 const struct bpf_line_info *linfo; 335 336 linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip); 337 if (linfo) { 338 btf_dump_linfo_plain(btf, linfo, "; ", 339 linum); 340 nr_skip++; 341 } 342 } 343 344 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 345 346 printf("% 4d: ", i); 347 print_bpf_insn(&cbs, insn + i, true); 348 349 if (opcodes) { 350 printf(" "); 351 fprint_hex(stdout, insn + i, 8, " "); 352 if (double_insn && i < len - 1) { 353 printf(" "); 354 fprint_hex(stdout, insn + i + 1, 8, " "); 355 } 356 printf("\n"); 357 } 358 } 359} 360 361void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end, 362 unsigned int start_idx) 363{ 364 const struct bpf_insn_cbs cbs = { 365 .cb_print = print_insn_for_graph, 366 .cb_call = print_call, 367 .cb_imm = print_imm, 368 .private_data = dd, 369 }; 370 struct bpf_insn *insn_start = buf_start; 371 struct bpf_insn *insn_end = buf_end; 372 struct bpf_insn *cur = insn_start; 373 374 for (; cur <= insn_end; cur++) { 375 printf("% 4d: ", (int)(cur - insn_start + start_idx)); 376 print_bpf_insn(&cbs, cur, true); 377 if (cur != insn_end) 378 printf(" | "); 379 } 380}