cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

relo_core.c (40694B)


      1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
      2/* Copyright (c) 2019 Facebook */
      3
      4#ifdef __KERNEL__
      5#include <linux/bpf.h>
      6#include <linux/btf.h>
      7#include <linux/string.h>
      8#include <linux/bpf_verifier.h>
      9#include "relo_core.h"
     10
     11static const char *btf_kind_str(const struct btf_type *t)
     12{
     13	return btf_type_str(t);
     14}
     15
     16static bool is_ldimm64_insn(struct bpf_insn *insn)
     17{
     18	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
     19}
     20
     21static const struct btf_type *
     22skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
     23{
     24	return btf_type_skip_modifiers(btf, id, res_id);
     25}
     26
     27static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
     28{
     29	return btf_name_by_offset(btf, offset);
     30}
     31
     32static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
     33{
     34	const struct btf_type *t;
     35	int size;
     36
     37	t = btf_type_by_id(btf, type_id);
     38	t = btf_resolve_size(btf, t, &size);
     39	if (IS_ERR(t))
     40		return PTR_ERR(t);
     41	return size;
     42}
     43
     44enum libbpf_print_level {
     45	LIBBPF_WARN,
     46	LIBBPF_INFO,
     47	LIBBPF_DEBUG,
     48};
     49
     50#undef pr_warn
     51#undef pr_info
     52#undef pr_debug
     53#define pr_warn(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
     54#define pr_info(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
     55#define pr_debug(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
     56#define libbpf_print(level, fmt, ...)	bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
     57#else
     58#include <stdio.h>
     59#include <string.h>
     60#include <errno.h>
     61#include <ctype.h>
     62#include <linux/err.h>
     63
     64#include "libbpf.h"
     65#include "bpf.h"
     66#include "btf.h"
     67#include "str_error.h"
     68#include "libbpf_internal.h"
     69#endif
     70
     71static bool is_flex_arr(const struct btf *btf,
     72			const struct bpf_core_accessor *acc,
     73			const struct btf_array *arr)
     74{
     75	const struct btf_type *t;
     76
     77	/* not a flexible array, if not inside a struct or has non-zero size */
     78	if (!acc->name || arr->nelems > 0)
     79		return false;
     80
     81	/* has to be the last member of enclosing struct */
     82	t = btf_type_by_id(btf, acc->type_id);
     83	return acc->idx == btf_vlen(t) - 1;
     84}
     85
     86static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
     87{
     88	switch (kind) {
     89	case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
     90	case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
     91	case BPF_CORE_FIELD_EXISTS: return "field_exists";
     92	case BPF_CORE_FIELD_SIGNED: return "signed";
     93	case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
     94	case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
     95	case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
     96	case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
     97	case BPF_CORE_TYPE_EXISTS: return "type_exists";
     98	case BPF_CORE_TYPE_SIZE: return "type_size";
     99	case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
    100	case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
    101	default: return "unknown";
    102	}
    103}
    104
    105static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
    106{
    107	switch (kind) {
    108	case BPF_CORE_FIELD_BYTE_OFFSET:
    109	case BPF_CORE_FIELD_BYTE_SIZE:
    110	case BPF_CORE_FIELD_EXISTS:
    111	case BPF_CORE_FIELD_SIGNED:
    112	case BPF_CORE_FIELD_LSHIFT_U64:
    113	case BPF_CORE_FIELD_RSHIFT_U64:
    114		return true;
    115	default:
    116		return false;
    117	}
    118}
    119
    120static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
    121{
    122	switch (kind) {
    123	case BPF_CORE_TYPE_ID_LOCAL:
    124	case BPF_CORE_TYPE_ID_TARGET:
    125	case BPF_CORE_TYPE_EXISTS:
    126	case BPF_CORE_TYPE_SIZE:
    127		return true;
    128	default:
    129		return false;
    130	}
    131}
    132
    133static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
    134{
    135	switch (kind) {
    136	case BPF_CORE_ENUMVAL_EXISTS:
    137	case BPF_CORE_ENUMVAL_VALUE:
    138		return true;
    139	default:
    140		return false;
    141	}
    142}
    143
    144/*
    145 * Turn bpf_core_relo into a low- and high-level spec representation,
    146 * validating correctness along the way, as well as calculating resulting
    147 * field bit offset, specified by accessor string. Low-level spec captures
    148 * every single level of nestedness, including traversing anonymous
    149 * struct/union members. High-level one only captures semantically meaningful
    150 * "turning points": named fields and array indicies.
    151 * E.g., for this case:
    152 *
    153 *   struct sample {
    154 *       int __unimportant;
    155 *       struct {
    156 *           int __1;
    157 *           int __2;
    158 *           int a[7];
    159 *       };
    160 *   };
    161 *
    162 *   struct sample *s = ...;
    163 *
    164 *   int x = &s->a[3]; // access string = '0:1:2:3'
    165 *
    166 * Low-level spec has 1:1 mapping with each element of access string (it's
    167 * just a parsed access string representation): [0, 1, 2, 3].
    168 *
    169 * High-level spec will capture only 3 points:
    170 *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
    171 *   - field 'a' access (corresponds to '2' in low-level spec);
    172 *   - array element #3 access (corresponds to '3' in low-level spec).
    173 *
    174 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
    175 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
    176 * spec and raw_spec are kept empty.
    177 *
    178 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
    179 * string to specify enumerator's value index that need to be relocated.
    180 */
    181int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
    182			const struct bpf_core_relo *relo,
    183			struct bpf_core_spec *spec)
    184{
    185	int access_idx, parsed_len, i;
    186	struct bpf_core_accessor *acc;
    187	const struct btf_type *t;
    188	const char *name, *spec_str;
    189	__u32 id;
    190	__s64 sz;
    191
    192	spec_str = btf__name_by_offset(btf, relo->access_str_off);
    193	if (str_is_empty(spec_str) || *spec_str == ':')
    194		return -EINVAL;
    195
    196	memset(spec, 0, sizeof(*spec));
    197	spec->btf = btf;
    198	spec->root_type_id = relo->type_id;
    199	spec->relo_kind = relo->kind;
    200
    201	/* type-based relocations don't have a field access string */
    202	if (core_relo_is_type_based(relo->kind)) {
    203		if (strcmp(spec_str, "0"))
    204			return -EINVAL;
    205		return 0;
    206	}
    207
    208	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
    209	while (*spec_str) {
    210		if (*spec_str == ':')
    211			++spec_str;
    212		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
    213			return -EINVAL;
    214		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
    215			return -E2BIG;
    216		spec_str += parsed_len;
    217		spec->raw_spec[spec->raw_len++] = access_idx;
    218	}
    219
    220	if (spec->raw_len == 0)
    221		return -EINVAL;
    222
    223	t = skip_mods_and_typedefs(btf, relo->type_id, &id);
    224	if (!t)
    225		return -EINVAL;
    226
    227	access_idx = spec->raw_spec[0];
    228	acc = &spec->spec[0];
    229	acc->type_id = id;
    230	acc->idx = access_idx;
    231	spec->len++;
    232
    233	if (core_relo_is_enumval_based(relo->kind)) {
    234		if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
    235			return -EINVAL;
    236
    237		/* record enumerator name in a first accessor */
    238		acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
    239		return 0;
    240	}
    241
    242	if (!core_relo_is_field_based(relo->kind))
    243		return -EINVAL;
    244
    245	sz = btf__resolve_size(btf, id);
    246	if (sz < 0)
    247		return sz;
    248	spec->bit_offset = access_idx * sz * 8;
    249
    250	for (i = 1; i < spec->raw_len; i++) {
    251		t = skip_mods_and_typedefs(btf, id, &id);
    252		if (!t)
    253			return -EINVAL;
    254
    255		access_idx = spec->raw_spec[i];
    256		acc = &spec->spec[spec->len];
    257
    258		if (btf_is_composite(t)) {
    259			const struct btf_member *m;
    260			__u32 bit_offset;
    261
    262			if (access_idx >= btf_vlen(t))
    263				return -EINVAL;
    264
    265			bit_offset = btf_member_bit_offset(t, access_idx);
    266			spec->bit_offset += bit_offset;
    267
    268			m = btf_members(t) + access_idx;
    269			if (m->name_off) {
    270				name = btf__name_by_offset(btf, m->name_off);
    271				if (str_is_empty(name))
    272					return -EINVAL;
    273
    274				acc->type_id = id;
    275				acc->idx = access_idx;
    276				acc->name = name;
    277				spec->len++;
    278			}
    279
    280			id = m->type;
    281		} else if (btf_is_array(t)) {
    282			const struct btf_array *a = btf_array(t);
    283			bool flex;
    284
    285			t = skip_mods_and_typedefs(btf, a->type, &id);
    286			if (!t)
    287				return -EINVAL;
    288
    289			flex = is_flex_arr(btf, acc - 1, a);
    290			if (!flex && access_idx >= a->nelems)
    291				return -EINVAL;
    292
    293			spec->spec[spec->len].type_id = id;
    294			spec->spec[spec->len].idx = access_idx;
    295			spec->len++;
    296
    297			sz = btf__resolve_size(btf, id);
    298			if (sz < 0)
    299				return sz;
    300			spec->bit_offset += access_idx * sz * 8;
    301		} else {
    302			pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
    303				prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
    304			return -EINVAL;
    305		}
    306	}
    307
    308	return 0;
    309}
    310
    311/* Check two types for compatibility for the purpose of field access
    312 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
    313 * are relocating semantically compatible entities:
    314 *   - any two STRUCTs/UNIONs are compatible and can be mixed;
    315 *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
    316 *   - any two PTRs are always compatible;
    317 *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
    318 *     least one of enums should be anonymous;
    319 *   - for ENUMs, check sizes, names are ignored;
    320 *   - for INT, size and signedness are ignored;
    321 *   - any two FLOATs are always compatible;
    322 *   - for ARRAY, dimensionality is ignored, element types are checked for
    323 *     compatibility recursively;
    324 *   - everything else shouldn't be ever a target of relocation.
    325 * These rules are not set in stone and probably will be adjusted as we get
    326 * more experience with using BPF CO-RE relocations.
    327 */
    328static int bpf_core_fields_are_compat(const struct btf *local_btf,
    329				      __u32 local_id,
    330				      const struct btf *targ_btf,
    331				      __u32 targ_id)
    332{
    333	const struct btf_type *local_type, *targ_type;
    334
    335recur:
    336	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
    337	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
    338	if (!local_type || !targ_type)
    339		return -EINVAL;
    340
    341	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
    342		return 1;
    343	if (btf_kind(local_type) != btf_kind(targ_type))
    344		return 0;
    345
    346	switch (btf_kind(local_type)) {
    347	case BTF_KIND_PTR:
    348	case BTF_KIND_FLOAT:
    349		return 1;
    350	case BTF_KIND_FWD:
    351	case BTF_KIND_ENUM: {
    352		const char *local_name, *targ_name;
    353		size_t local_len, targ_len;
    354
    355		local_name = btf__name_by_offset(local_btf,
    356						 local_type->name_off);
    357		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
    358		local_len = bpf_core_essential_name_len(local_name);
    359		targ_len = bpf_core_essential_name_len(targ_name);
    360		/* one of them is anonymous or both w/ same flavor-less names */
    361		return local_len == 0 || targ_len == 0 ||
    362		       (local_len == targ_len &&
    363			strncmp(local_name, targ_name, local_len) == 0);
    364	}
    365	case BTF_KIND_INT:
    366		/* just reject deprecated bitfield-like integers; all other
    367		 * integers are by default compatible between each other
    368		 */
    369		return btf_int_offset(local_type) == 0 &&
    370		       btf_int_offset(targ_type) == 0;
    371	case BTF_KIND_ARRAY:
    372		local_id = btf_array(local_type)->type;
    373		targ_id = btf_array(targ_type)->type;
    374		goto recur;
    375	default:
    376		return 0;
    377	}
    378}
    379
    380/*
    381 * Given single high-level named field accessor in local type, find
    382 * corresponding high-level accessor for a target type. Along the way,
    383 * maintain low-level spec for target as well. Also keep updating target
    384 * bit offset.
    385 *
    386 * Searching is performed through recursive exhaustive enumeration of all
    387 * fields of a struct/union. If there are any anonymous (embedded)
    388 * structs/unions, they are recursively searched as well. If field with
    389 * desired name is found, check compatibility between local and target types,
    390 * before returning result.
    391 *
    392 * 1 is returned, if field is found.
    393 * 0 is returned if no compatible field is found.
    394 * <0 is returned on error.
    395 */
    396static int bpf_core_match_member(const struct btf *local_btf,
    397				 const struct bpf_core_accessor *local_acc,
    398				 const struct btf *targ_btf,
    399				 __u32 targ_id,
    400				 struct bpf_core_spec *spec,
    401				 __u32 *next_targ_id)
    402{
    403	const struct btf_type *local_type, *targ_type;
    404	const struct btf_member *local_member, *m;
    405	const char *local_name, *targ_name;
    406	__u32 local_id;
    407	int i, n, found;
    408
    409	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
    410	if (!targ_type)
    411		return -EINVAL;
    412	if (!btf_is_composite(targ_type))
    413		return 0;
    414
    415	local_id = local_acc->type_id;
    416	local_type = btf_type_by_id(local_btf, local_id);
    417	local_member = btf_members(local_type) + local_acc->idx;
    418	local_name = btf__name_by_offset(local_btf, local_member->name_off);
    419
    420	n = btf_vlen(targ_type);
    421	m = btf_members(targ_type);
    422	for (i = 0; i < n; i++, m++) {
    423		__u32 bit_offset;
    424
    425		bit_offset = btf_member_bit_offset(targ_type, i);
    426
    427		/* too deep struct/union/array nesting */
    428		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
    429			return -E2BIG;
    430
    431		/* speculate this member will be the good one */
    432		spec->bit_offset += bit_offset;
    433		spec->raw_spec[spec->raw_len++] = i;
    434
    435		targ_name = btf__name_by_offset(targ_btf, m->name_off);
    436		if (str_is_empty(targ_name)) {
    437			/* embedded struct/union, we need to go deeper */
    438			found = bpf_core_match_member(local_btf, local_acc,
    439						      targ_btf, m->type,
    440						      spec, next_targ_id);
    441			if (found) /* either found or error */
    442				return found;
    443		} else if (strcmp(local_name, targ_name) == 0) {
    444			/* matching named field */
    445			struct bpf_core_accessor *targ_acc;
    446
    447			targ_acc = &spec->spec[spec->len++];
    448			targ_acc->type_id = targ_id;
    449			targ_acc->idx = i;
    450			targ_acc->name = targ_name;
    451
    452			*next_targ_id = m->type;
    453			found = bpf_core_fields_are_compat(local_btf,
    454							   local_member->type,
    455							   targ_btf, m->type);
    456			if (!found)
    457				spec->len--; /* pop accessor */
    458			return found;
    459		}
    460		/* member turned out not to be what we looked for */
    461		spec->bit_offset -= bit_offset;
    462		spec->raw_len--;
    463	}
    464
    465	return 0;
    466}
    467
    468/*
    469 * Try to match local spec to a target type and, if successful, produce full
    470 * target spec (high-level, low-level + bit offset).
    471 */
    472static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
    473			       const struct btf *targ_btf, __u32 targ_id,
    474			       struct bpf_core_spec *targ_spec)
    475{
    476	const struct btf_type *targ_type;
    477	const struct bpf_core_accessor *local_acc;
    478	struct bpf_core_accessor *targ_acc;
    479	int i, sz, matched;
    480
    481	memset(targ_spec, 0, sizeof(*targ_spec));
    482	targ_spec->btf = targ_btf;
    483	targ_spec->root_type_id = targ_id;
    484	targ_spec->relo_kind = local_spec->relo_kind;
    485
    486	if (core_relo_is_type_based(local_spec->relo_kind)) {
    487		return bpf_core_types_are_compat(local_spec->btf,
    488						 local_spec->root_type_id,
    489						 targ_btf, targ_id);
    490	}
    491
    492	local_acc = &local_spec->spec[0];
    493	targ_acc = &targ_spec->spec[0];
    494
    495	if (core_relo_is_enumval_based(local_spec->relo_kind)) {
    496		size_t local_essent_len, targ_essent_len;
    497		const struct btf_enum *e;
    498		const char *targ_name;
    499
    500		/* has to resolve to an enum */
    501		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
    502		if (!btf_is_enum(targ_type))
    503			return 0;
    504
    505		local_essent_len = bpf_core_essential_name_len(local_acc->name);
    506
    507		for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
    508			targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
    509			targ_essent_len = bpf_core_essential_name_len(targ_name);
    510			if (targ_essent_len != local_essent_len)
    511				continue;
    512			if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
    513				targ_acc->type_id = targ_id;
    514				targ_acc->idx = i;
    515				targ_acc->name = targ_name;
    516				targ_spec->len++;
    517				targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
    518				targ_spec->raw_len++;
    519				return 1;
    520			}
    521		}
    522		return 0;
    523	}
    524
    525	if (!core_relo_is_field_based(local_spec->relo_kind))
    526		return -EINVAL;
    527
    528	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
    529		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
    530						   &targ_id);
    531		if (!targ_type)
    532			return -EINVAL;
    533
    534		if (local_acc->name) {
    535			matched = bpf_core_match_member(local_spec->btf,
    536							local_acc,
    537							targ_btf, targ_id,
    538							targ_spec, &targ_id);
    539			if (matched <= 0)
    540				return matched;
    541		} else {
    542			/* for i=0, targ_id is already treated as array element
    543			 * type (because it's the original struct), for others
    544			 * we should find array element type first
    545			 */
    546			if (i > 0) {
    547				const struct btf_array *a;
    548				bool flex;
    549
    550				if (!btf_is_array(targ_type))
    551					return 0;
    552
    553				a = btf_array(targ_type);
    554				flex = is_flex_arr(targ_btf, targ_acc - 1, a);
    555				if (!flex && local_acc->idx >= a->nelems)
    556					return 0;
    557				if (!skip_mods_and_typedefs(targ_btf, a->type,
    558							    &targ_id))
    559					return -EINVAL;
    560			}
    561
    562			/* too deep struct/union/array nesting */
    563			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
    564				return -E2BIG;
    565
    566			targ_acc->type_id = targ_id;
    567			targ_acc->idx = local_acc->idx;
    568			targ_acc->name = NULL;
    569			targ_spec->len++;
    570			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
    571			targ_spec->raw_len++;
    572
    573			sz = btf__resolve_size(targ_btf, targ_id);
    574			if (sz < 0)
    575				return sz;
    576			targ_spec->bit_offset += local_acc->idx * sz * 8;
    577		}
    578	}
    579
    580	return 1;
    581}
    582
    583static int bpf_core_calc_field_relo(const char *prog_name,
    584				    const struct bpf_core_relo *relo,
    585				    const struct bpf_core_spec *spec,
    586				    __u32 *val, __u32 *field_sz, __u32 *type_id,
    587				    bool *validate)
    588{
    589	const struct bpf_core_accessor *acc;
    590	const struct btf_type *t;
    591	__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
    592	const struct btf_member *m;
    593	const struct btf_type *mt;
    594	bool bitfield;
    595	__s64 sz;
    596
    597	*field_sz = 0;
    598
    599	if (relo->kind == BPF_CORE_FIELD_EXISTS) {
    600		*val = spec ? 1 : 0;
    601		return 0;
    602	}
    603
    604	if (!spec)
    605		return -EUCLEAN; /* request instruction poisoning */
    606
    607	acc = &spec->spec[spec->len - 1];
    608	t = btf_type_by_id(spec->btf, acc->type_id);
    609
    610	/* a[n] accessor needs special handling */
    611	if (!acc->name) {
    612		if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
    613			*val = spec->bit_offset / 8;
    614			/* remember field size for load/store mem size */
    615			sz = btf__resolve_size(spec->btf, acc->type_id);
    616			if (sz < 0)
    617				return -EINVAL;
    618			*field_sz = sz;
    619			*type_id = acc->type_id;
    620		} else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
    621			sz = btf__resolve_size(spec->btf, acc->type_id);
    622			if (sz < 0)
    623				return -EINVAL;
    624			*val = sz;
    625		} else {
    626			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
    627				prog_name, relo->kind, relo->insn_off / 8);
    628			return -EINVAL;
    629		}
    630		if (validate)
    631			*validate = true;
    632		return 0;
    633	}
    634
    635	m = btf_members(t) + acc->idx;
    636	mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
    637	bit_off = spec->bit_offset;
    638	bit_sz = btf_member_bitfield_size(t, acc->idx);
    639
    640	bitfield = bit_sz > 0;
    641	if (bitfield) {
    642		byte_sz = mt->size;
    643		byte_off = bit_off / 8 / byte_sz * byte_sz;
    644		/* figure out smallest int size necessary for bitfield load */
    645		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
    646			if (byte_sz >= 8) {
    647				/* bitfield can't be read with 64-bit read */
    648				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
    649					prog_name, relo->kind, relo->insn_off / 8);
    650				return -E2BIG;
    651			}
    652			byte_sz *= 2;
    653			byte_off = bit_off / 8 / byte_sz * byte_sz;
    654		}
    655	} else {
    656		sz = btf__resolve_size(spec->btf, field_type_id);
    657		if (sz < 0)
    658			return -EINVAL;
    659		byte_sz = sz;
    660		byte_off = spec->bit_offset / 8;
    661		bit_sz = byte_sz * 8;
    662	}
    663
    664	/* for bitfields, all the relocatable aspects are ambiguous and we
    665	 * might disagree with compiler, so turn off validation of expected
    666	 * value, except for signedness
    667	 */
    668	if (validate)
    669		*validate = !bitfield;
    670
    671	switch (relo->kind) {
    672	case BPF_CORE_FIELD_BYTE_OFFSET:
    673		*val = byte_off;
    674		if (!bitfield) {
    675			*field_sz = byte_sz;
    676			*type_id = field_type_id;
    677		}
    678		break;
    679	case BPF_CORE_FIELD_BYTE_SIZE:
    680		*val = byte_sz;
    681		break;
    682	case BPF_CORE_FIELD_SIGNED:
    683		/* enums will be assumed unsigned */
    684		*val = btf_is_enum(mt) ||
    685		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
    686		if (validate)
    687			*validate = true; /* signedness is never ambiguous */
    688		break;
    689	case BPF_CORE_FIELD_LSHIFT_U64:
    690#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
    691		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
    692#else
    693		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
    694#endif
    695		break;
    696	case BPF_CORE_FIELD_RSHIFT_U64:
    697		*val = 64 - bit_sz;
    698		if (validate)
    699			*validate = true; /* right shift is never ambiguous */
    700		break;
    701	case BPF_CORE_FIELD_EXISTS:
    702	default:
    703		return -EOPNOTSUPP;
    704	}
    705
    706	return 0;
    707}
    708
    709static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
    710				   const struct bpf_core_spec *spec,
    711				   __u32 *val, bool *validate)
    712{
    713	__s64 sz;
    714
    715	/* by default, always check expected value in bpf_insn */
    716	if (validate)
    717		*validate = true;
    718
    719	/* type-based relos return zero when target type is not found */
    720	if (!spec) {
    721		*val = 0;
    722		return 0;
    723	}
    724
    725	switch (relo->kind) {
    726	case BPF_CORE_TYPE_ID_TARGET:
    727		*val = spec->root_type_id;
    728		/* type ID, embedded in bpf_insn, might change during linking,
    729		 * so enforcing it is pointless
    730		 */
    731		if (validate)
    732			*validate = false;
    733		break;
    734	case BPF_CORE_TYPE_EXISTS:
    735		*val = 1;
    736		break;
    737	case BPF_CORE_TYPE_SIZE:
    738		sz = btf__resolve_size(spec->btf, spec->root_type_id);
    739		if (sz < 0)
    740			return -EINVAL;
    741		*val = sz;
    742		break;
    743	case BPF_CORE_TYPE_ID_LOCAL:
    744	/* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
    745	default:
    746		return -EOPNOTSUPP;
    747	}
    748
    749	return 0;
    750}
    751
    752static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
    753				      const struct bpf_core_spec *spec,
    754				      __u32 *val)
    755{
    756	const struct btf_type *t;
    757	const struct btf_enum *e;
    758
    759	switch (relo->kind) {
    760	case BPF_CORE_ENUMVAL_EXISTS:
    761		*val = spec ? 1 : 0;
    762		break;
    763	case BPF_CORE_ENUMVAL_VALUE:
    764		if (!spec)
    765			return -EUCLEAN; /* request instruction poisoning */
    766		t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
    767		e = btf_enum(t) + spec->spec[0].idx;
    768		*val = e->val;
    769		break;
    770	default:
    771		return -EOPNOTSUPP;
    772	}
    773
    774	return 0;
    775}
    776
    777/* Calculate original and target relocation values, given local and target
    778 * specs and relocation kind. These values are calculated for each candidate.
    779 * If there are multiple candidates, resulting values should all be consistent
    780 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
    781 * If instruction has to be poisoned, *poison will be set to true.
    782 */
    783static int bpf_core_calc_relo(const char *prog_name,
    784			      const struct bpf_core_relo *relo,
    785			      int relo_idx,
    786			      const struct bpf_core_spec *local_spec,
    787			      const struct bpf_core_spec *targ_spec,
    788			      struct bpf_core_relo_res *res)
    789{
    790	int err = -EOPNOTSUPP;
    791
    792	res->orig_val = 0;
    793	res->new_val = 0;
    794	res->poison = false;
    795	res->validate = true;
    796	res->fail_memsz_adjust = false;
    797	res->orig_sz = res->new_sz = 0;
    798	res->orig_type_id = res->new_type_id = 0;
    799
    800	if (core_relo_is_field_based(relo->kind)) {
    801		err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
    802					       &res->orig_val, &res->orig_sz,
    803					       &res->orig_type_id, &res->validate);
    804		err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
    805						      &res->new_val, &res->new_sz,
    806						      &res->new_type_id, NULL);
    807		if (err)
    808			goto done;
    809		/* Validate if it's safe to adjust load/store memory size.
    810		 * Adjustments are performed only if original and new memory
    811		 * sizes differ.
    812		 */
    813		res->fail_memsz_adjust = false;
    814		if (res->orig_sz != res->new_sz) {
    815			const struct btf_type *orig_t, *new_t;
    816
    817			orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
    818			new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
    819
    820			/* There are two use cases in which it's safe to
    821			 * adjust load/store's mem size:
    822			 *   - reading a 32-bit kernel pointer, while on BPF
    823			 *   size pointers are always 64-bit; in this case
    824			 *   it's safe to "downsize" instruction size due to
    825			 *   pointer being treated as unsigned integer with
    826			 *   zero-extended upper 32-bits;
    827			 *   - reading unsigned integers, again due to
    828			 *   zero-extension is preserving the value correctly.
    829			 *
    830			 * In all other cases it's incorrect to attempt to
    831			 * load/store field because read value will be
    832			 * incorrect, so we poison relocated instruction.
    833			 */
    834			if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
    835				goto done;
    836			if (btf_is_int(orig_t) && btf_is_int(new_t) &&
    837			    btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
    838			    btf_int_encoding(new_t) != BTF_INT_SIGNED)
    839				goto done;
    840
    841			/* mark as invalid mem size adjustment, but this will
    842			 * only be checked for LDX/STX/ST insns
    843			 */
    844			res->fail_memsz_adjust = true;
    845		}
    846	} else if (core_relo_is_type_based(relo->kind)) {
    847		err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
    848		err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
    849	} else if (core_relo_is_enumval_based(relo->kind)) {
    850		err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
    851		err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
    852	}
    853
    854done:
    855	if (err == -EUCLEAN) {
    856		/* EUCLEAN is used to signal instruction poisoning request */
    857		res->poison = true;
    858		err = 0;
    859	} else if (err == -EOPNOTSUPP) {
    860		/* EOPNOTSUPP means unknown/unsupported relocation */
    861		pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
    862			prog_name, relo_idx, core_relo_kind_str(relo->kind),
    863			relo->kind, relo->insn_off / 8);
    864	}
    865
    866	return err;
    867}
    868
    869/*
    870 * Turn instruction for which CO_RE relocation failed into invalid one with
    871 * distinct signature.
    872 */
    873static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
    874				 int insn_idx, struct bpf_insn *insn)
    875{
    876	pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
    877		 prog_name, relo_idx, insn_idx);
    878	insn->code = BPF_JMP | BPF_CALL;
    879	insn->dst_reg = 0;
    880	insn->src_reg = 0;
    881	insn->off = 0;
    882	/* if this instruction is reachable (not a dead code),
    883	 * verifier will complain with the following message:
    884	 * invalid func unknown#195896080
    885	 */
    886	insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
    887}
    888
    889static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
    890{
    891	switch (BPF_SIZE(insn->code)) {
    892	case BPF_DW: return 8;
    893	case BPF_W: return 4;
    894	case BPF_H: return 2;
    895	case BPF_B: return 1;
    896	default: return -1;
    897	}
    898}
    899
    900static int insn_bytes_to_bpf_size(__u32 sz)
    901{
    902	switch (sz) {
    903	case 8: return BPF_DW;
    904	case 4: return BPF_W;
    905	case 2: return BPF_H;
    906	case 1: return BPF_B;
    907	default: return -1;
    908	}
    909}
    910
    911/*
    912 * Patch relocatable BPF instruction.
    913 *
    914 * Patched value is determined by relocation kind and target specification.
    915 * For existence relocations target spec will be NULL if field/type is not found.
    916 * Expected insn->imm value is determined using relocation kind and local
    917 * spec, and is checked before patching instruction. If actual insn->imm value
    918 * is wrong, bail out with error.
    919 *
    920 * Currently supported classes of BPF instruction are:
    921 * 1. rX = <imm> (assignment with immediate operand);
    922 * 2. rX += <imm> (arithmetic operations with immediate operand);
    923 * 3. rX = <imm64> (load with 64-bit immediate value);
    924 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
    925 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
    926 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
    927 */
    928int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
    929			int insn_idx, const struct bpf_core_relo *relo,
    930			int relo_idx, const struct bpf_core_relo_res *res)
    931{
    932	__u32 orig_val, new_val;
    933	__u8 class;
    934
    935	class = BPF_CLASS(insn->code);
    936
    937	if (res->poison) {
    938poison:
    939		/* poison second part of ldimm64 to avoid confusing error from
    940		 * verifier about "unknown opcode 00"
    941		 */
    942		if (is_ldimm64_insn(insn))
    943			bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
    944		bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
    945		return 0;
    946	}
    947
    948	orig_val = res->orig_val;
    949	new_val = res->new_val;
    950
    951	switch (class) {
    952	case BPF_ALU:
    953	case BPF_ALU64:
    954		if (BPF_SRC(insn->code) != BPF_K)
    955			return -EINVAL;
    956		if (res->validate && insn->imm != orig_val) {
    957			pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
    958				prog_name, relo_idx,
    959				insn_idx, insn->imm, orig_val, new_val);
    960			return -EINVAL;
    961		}
    962		orig_val = insn->imm;
    963		insn->imm = new_val;
    964		pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
    965			 prog_name, relo_idx, insn_idx,
    966			 orig_val, new_val);
    967		break;
    968	case BPF_LDX:
    969	case BPF_ST:
    970	case BPF_STX:
    971		if (res->validate && insn->off != orig_val) {
    972			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
    973				prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
    974			return -EINVAL;
    975		}
    976		if (new_val > SHRT_MAX) {
    977			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
    978				prog_name, relo_idx, insn_idx, new_val);
    979			return -ERANGE;
    980		}
    981		if (res->fail_memsz_adjust) {
    982			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
    983				"Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
    984				prog_name, relo_idx, insn_idx);
    985			goto poison;
    986		}
    987
    988		orig_val = insn->off;
    989		insn->off = new_val;
    990		pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
    991			 prog_name, relo_idx, insn_idx, orig_val, new_val);
    992
    993		if (res->new_sz != res->orig_sz) {
    994			int insn_bytes_sz, insn_bpf_sz;
    995
    996			insn_bytes_sz = insn_bpf_size_to_bytes(insn);
    997			if (insn_bytes_sz != res->orig_sz) {
    998				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
    999					prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
   1000				return -EINVAL;
   1001			}
   1002
   1003			insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
   1004			if (insn_bpf_sz < 0) {
   1005				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
   1006					prog_name, relo_idx, insn_idx, res->new_sz);
   1007				return -EINVAL;
   1008			}
   1009
   1010			insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
   1011			pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
   1012				 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
   1013		}
   1014		break;
   1015	case BPF_LD: {
   1016		__u64 imm;
   1017
   1018		if (!is_ldimm64_insn(insn) ||
   1019		    insn[0].src_reg != 0 || insn[0].off != 0 ||
   1020		    insn[1].code != 0 || insn[1].dst_reg != 0 ||
   1021		    insn[1].src_reg != 0 || insn[1].off != 0) {
   1022			pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
   1023				prog_name, relo_idx, insn_idx);
   1024			return -EINVAL;
   1025		}
   1026
   1027		imm = insn[0].imm + ((__u64)insn[1].imm << 32);
   1028		if (res->validate && imm != orig_val) {
   1029			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
   1030				prog_name, relo_idx,
   1031				insn_idx, (unsigned long long)imm,
   1032				orig_val, new_val);
   1033			return -EINVAL;
   1034		}
   1035
   1036		insn[0].imm = new_val;
   1037		insn[1].imm = 0; /* currently only 32-bit values are supported */
   1038		pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
   1039			 prog_name, relo_idx, insn_idx,
   1040			 (unsigned long long)imm, new_val);
   1041		break;
   1042	}
   1043	default:
   1044		pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
   1045			prog_name, relo_idx, insn_idx, insn->code,
   1046			insn->src_reg, insn->dst_reg, insn->off, insn->imm);
   1047		return -EINVAL;
   1048	}
   1049
   1050	return 0;
   1051}
   1052
   1053/* Output spec definition in the format:
   1054 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
   1055 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
   1056 */
   1057int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
   1058{
   1059	const struct btf_type *t;
   1060	const struct btf_enum *e;
   1061	const char *s;
   1062	__u32 type_id;
   1063	int i, len = 0;
   1064
   1065#define append_buf(fmt, args...)				\
   1066	({							\
   1067		int r;						\
   1068		r = snprintf(buf, buf_sz, fmt, ##args);		\
   1069		len += r;					\
   1070		if (r >= buf_sz)				\
   1071			r = buf_sz;				\
   1072		buf += r;					\
   1073		buf_sz -= r;					\
   1074	})
   1075
   1076	type_id = spec->root_type_id;
   1077	t = btf_type_by_id(spec->btf, type_id);
   1078	s = btf__name_by_offset(spec->btf, t->name_off);
   1079
   1080	append_buf("<%s> [%u] %s %s",
   1081		   core_relo_kind_str(spec->relo_kind),
   1082		   type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
   1083
   1084	if (core_relo_is_type_based(spec->relo_kind))
   1085		return len;
   1086
   1087	if (core_relo_is_enumval_based(spec->relo_kind)) {
   1088		t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
   1089		e = btf_enum(t) + spec->raw_spec[0];
   1090		s = btf__name_by_offset(spec->btf, e->name_off);
   1091
   1092		append_buf("::%s = %u", s, e->val);
   1093		return len;
   1094	}
   1095
   1096	if (core_relo_is_field_based(spec->relo_kind)) {
   1097		for (i = 0; i < spec->len; i++) {
   1098			if (spec->spec[i].name)
   1099				append_buf(".%s", spec->spec[i].name);
   1100			else if (i > 0 || spec->spec[i].idx > 0)
   1101				append_buf("[%u]", spec->spec[i].idx);
   1102		}
   1103
   1104		append_buf(" (");
   1105		for (i = 0; i < spec->raw_len; i++)
   1106			append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
   1107
   1108		if (spec->bit_offset % 8)
   1109			append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
   1110		else
   1111			append_buf(" @ offset %u)", spec->bit_offset / 8);
   1112		return len;
   1113	}
   1114
   1115	return len;
   1116#undef append_buf
   1117}
   1118
   1119/*
   1120 * Calculate CO-RE relocation target result.
   1121 *
   1122 * The outline and important points of the algorithm:
   1123 * 1. For given local type, find corresponding candidate target types.
   1124 *    Candidate type is a type with the same "essential" name, ignoring
   1125 *    everything after last triple underscore (___). E.g., `sample`,
   1126 *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
   1127 *    for each other. Names with triple underscore are referred to as
   1128 *    "flavors" and are useful, among other things, to allow to
   1129 *    specify/support incompatible variations of the same kernel struct, which
   1130 *    might differ between different kernel versions and/or build
   1131 *    configurations.
   1132 *
   1133 *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
   1134 *    converter, when deduplicated BTF of a kernel still contains more than
   1135 *    one different types with the same name. In that case, ___2, ___3, etc
   1136 *    are appended starting from second name conflict. But start flavors are
   1137 *    also useful to be defined "locally", in BPF program, to extract same
   1138 *    data from incompatible changes between different kernel
   1139 *    versions/configurations. For instance, to handle field renames between
   1140 *    kernel versions, one can use two flavors of the struct name with the
   1141 *    same common name and use conditional relocations to extract that field,
   1142 *    depending on target kernel version.
   1143 * 2. For each candidate type, try to match local specification to this
   1144 *    candidate target type. Matching involves finding corresponding
   1145 *    high-level spec accessors, meaning that all named fields should match,
   1146 *    as well as all array accesses should be within the actual bounds. Also,
   1147 *    types should be compatible (see bpf_core_fields_are_compat for details).
   1148 * 3. It is supported and expected that there might be multiple flavors
   1149 *    matching the spec. As long as all the specs resolve to the same set of
   1150 *    offsets across all candidates, there is no error. If there is any
   1151 *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
   1152 *    imprefection of BTF deduplication, which can cause slight duplication of
   1153 *    the same BTF type, if some directly or indirectly referenced (by
   1154 *    pointer) type gets resolved to different actual types in different
   1155 *    object files. If such situation occurs, deduplicated BTF will end up
   1156 *    with two (or more) structurally identical types, which differ only in
   1157 *    types they refer to through pointer. This should be OK in most cases and
   1158 *    is not an error.
   1159 * 4. Candidate types search is performed by linearly scanning through all
   1160 *    types in target BTF. It is anticipated that this is overall more
   1161 *    efficient memory-wise and not significantly worse (if not better)
   1162 *    CPU-wise compared to prebuilding a map from all local type names to
   1163 *    a list of candidate type names. It's also sped up by caching resolved
   1164 *    list of matching candidates per each local "root" type ID, that has at
   1165 *    least one bpf_core_relo associated with it. This list is shared
   1166 *    between multiple relocations for the same type ID and is updated as some
   1167 *    of the candidates are pruned due to structural incompatibility.
   1168 */
   1169int bpf_core_calc_relo_insn(const char *prog_name,
   1170			    const struct bpf_core_relo *relo,
   1171			    int relo_idx,
   1172			    const struct btf *local_btf,
   1173			    struct bpf_core_cand_list *cands,
   1174			    struct bpf_core_spec *specs_scratch,
   1175			    struct bpf_core_relo_res *targ_res)
   1176{
   1177	struct bpf_core_spec *local_spec = &specs_scratch[0];
   1178	struct bpf_core_spec *cand_spec = &specs_scratch[1];
   1179	struct bpf_core_spec *targ_spec = &specs_scratch[2];
   1180	struct bpf_core_relo_res cand_res;
   1181	const struct btf_type *local_type;
   1182	const char *local_name;
   1183	__u32 local_id;
   1184	char spec_buf[256];
   1185	int i, j, err;
   1186
   1187	local_id = relo->type_id;
   1188	local_type = btf_type_by_id(local_btf, local_id);
   1189	local_name = btf__name_by_offset(local_btf, local_type->name_off);
   1190	if (!local_name)
   1191		return -EINVAL;
   1192
   1193	err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
   1194	if (err) {
   1195		const char *spec_str;
   1196
   1197		spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
   1198		pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
   1199			prog_name, relo_idx, local_id, btf_kind_str(local_type),
   1200			str_is_empty(local_name) ? "<anon>" : local_name,
   1201			spec_str ?: "<?>", err);
   1202		return -EINVAL;
   1203	}
   1204
   1205	bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
   1206	pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
   1207
   1208	/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
   1209	if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
   1210		/* bpf_insn's imm value could get out of sync during linking */
   1211		memset(targ_res, 0, sizeof(*targ_res));
   1212		targ_res->validate = false;
   1213		targ_res->poison = false;
   1214		targ_res->orig_val = local_spec->root_type_id;
   1215		targ_res->new_val = local_spec->root_type_id;
   1216		return 0;
   1217	}
   1218
   1219	/* libbpf doesn't support candidate search for anonymous types */
   1220	if (str_is_empty(local_name)) {
   1221		pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
   1222			prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
   1223		return -EOPNOTSUPP;
   1224	}
   1225
   1226	for (i = 0, j = 0; i < cands->len; i++) {
   1227		err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
   1228					  cands->cands[i].id, cand_spec);
   1229		if (err < 0) {
   1230			bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
   1231			pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
   1232				prog_name, relo_idx, i, spec_buf, err);
   1233			return err;
   1234		}
   1235
   1236		bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
   1237		pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
   1238			 relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
   1239
   1240		if (err == 0)
   1241			continue;
   1242
   1243		err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
   1244		if (err)
   1245			return err;
   1246
   1247		if (j == 0) {
   1248			*targ_res = cand_res;
   1249			*targ_spec = *cand_spec;
   1250		} else if (cand_spec->bit_offset != targ_spec->bit_offset) {
   1251			/* if there are many field relo candidates, they
   1252			 * should all resolve to the same bit offset
   1253			 */
   1254			pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
   1255				prog_name, relo_idx, cand_spec->bit_offset,
   1256				targ_spec->bit_offset);
   1257			return -EINVAL;
   1258		} else if (cand_res.poison != targ_res->poison ||
   1259			   cand_res.new_val != targ_res->new_val) {
   1260			/* all candidates should result in the same relocation
   1261			 * decision and value, otherwise it's dangerous to
   1262			 * proceed due to ambiguity
   1263			 */
   1264			pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
   1265				prog_name, relo_idx,
   1266				cand_res.poison ? "failure" : "success", cand_res.new_val,
   1267				targ_res->poison ? "failure" : "success", targ_res->new_val);
   1268			return -EINVAL;
   1269		}
   1270
   1271		cands->cands[j++] = cands->cands[i];
   1272	}
   1273
   1274	/*
   1275	 * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
   1276	 * existence checks or kernel version/config checks, it's expected
   1277	 * that we might not find any candidates. In this case, if field
   1278	 * wasn't found in any candidate, the list of candidates shouldn't
   1279	 * change at all, we'll just handle relocating appropriately,
   1280	 * depending on relo's kind.
   1281	 */
   1282	if (j > 0)
   1283		cands->len = j;
   1284
   1285	/*
   1286	 * If no candidates were found, it might be both a programmer error,
   1287	 * as well as expected case, depending whether instruction w/
   1288	 * relocation is guarded in some way that makes it unreachable (dead
   1289	 * code) if relocation can't be resolved. This is handled in
   1290	 * bpf_core_patch_insn() uniformly by replacing that instruction with
   1291	 * BPF helper call insn (using invalid helper ID). If that instruction
   1292	 * is indeed unreachable, then it will be ignored and eliminated by
   1293	 * verifier. If it was an error, then verifier will complain and point
   1294	 * to a specific instruction number in its log.
   1295	 */
   1296	if (j == 0) {
   1297		pr_debug("prog '%s': relo #%d: no matching targets found\n",
   1298			 prog_name, relo_idx);
   1299
   1300		/* calculate single target relo result explicitly */
   1301		err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
   1302		if (err)
   1303			return err;
   1304	}
   1305
   1306	return 0;
   1307}