cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

arraymap.c (36883B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
      3 * Copyright (c) 2016,2017 Facebook
      4 */
      5#include <linux/bpf.h>
      6#include <linux/btf.h>
      7#include <linux/err.h>
      8#include <linux/slab.h>
      9#include <linux/mm.h>
     10#include <linux/filter.h>
     11#include <linux/perf_event.h>
     12#include <uapi/linux/btf.h>
     13#include <linux/rcupdate_trace.h>
     14#include <linux/btf_ids.h>
     15
     16#include "map_in_map.h"
     17
     18#define ARRAY_CREATE_FLAG_MASK \
     19	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
     20	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
     21
     22static void bpf_array_free_percpu(struct bpf_array *array)
     23{
     24	int i;
     25
     26	for (i = 0; i < array->map.max_entries; i++) {
     27		free_percpu(array->pptrs[i]);
     28		cond_resched();
     29	}
     30}
     31
     32static int bpf_array_alloc_percpu(struct bpf_array *array)
     33{
     34	void __percpu *ptr;
     35	int i;
     36
     37	for (i = 0; i < array->map.max_entries; i++) {
     38		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
     39					   GFP_USER | __GFP_NOWARN);
     40		if (!ptr) {
     41			bpf_array_free_percpu(array);
     42			return -ENOMEM;
     43		}
     44		array->pptrs[i] = ptr;
     45		cond_resched();
     46	}
     47
     48	return 0;
     49}
     50
     51/* Called from syscall */
     52int array_map_alloc_check(union bpf_attr *attr)
     53{
     54	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
     55	int numa_node = bpf_map_attr_numa_node(attr);
     56
     57	/* check sanity of attributes */
     58	if (attr->max_entries == 0 || attr->key_size != 4 ||
     59	    attr->value_size == 0 ||
     60	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
     61	    !bpf_map_flags_access_ok(attr->map_flags) ||
     62	    (percpu && numa_node != NUMA_NO_NODE))
     63		return -EINVAL;
     64
     65	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
     66	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
     67		return -EINVAL;
     68
     69	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
     70	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
     71		return -EINVAL;
     72
     73	if (attr->value_size > KMALLOC_MAX_SIZE)
     74		/* if value_size is bigger, the user space won't be able to
     75		 * access the elements.
     76		 */
     77		return -E2BIG;
     78
     79	return 0;
     80}
     81
     82static struct bpf_map *array_map_alloc(union bpf_attr *attr)
     83{
     84	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
     85	int numa_node = bpf_map_attr_numa_node(attr);
     86	u32 elem_size, index_mask, max_entries;
     87	bool bypass_spec_v1 = bpf_bypass_spec_v1();
     88	u64 array_size, mask64;
     89	struct bpf_array *array;
     90
     91	elem_size = round_up(attr->value_size, 8);
     92
     93	max_entries = attr->max_entries;
     94
     95	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
     96	 * upper most bit set in u32 space is undefined behavior due to
     97	 * resulting 1U << 32, so do it manually here in u64 space.
     98	 */
     99	mask64 = fls_long(max_entries - 1);
    100	mask64 = 1ULL << mask64;
    101	mask64 -= 1;
    102
    103	index_mask = mask64;
    104	if (!bypass_spec_v1) {
    105		/* round up array size to nearest power of 2,
    106		 * since cpu will speculate within index_mask limits
    107		 */
    108		max_entries = index_mask + 1;
    109		/* Check for overflows. */
    110		if (max_entries < attr->max_entries)
    111			return ERR_PTR(-E2BIG);
    112	}
    113
    114	array_size = sizeof(*array);
    115	if (percpu) {
    116		array_size += (u64) max_entries * sizeof(void *);
    117	} else {
    118		/* rely on vmalloc() to return page-aligned memory and
    119		 * ensure array->value is exactly page-aligned
    120		 */
    121		if (attr->map_flags & BPF_F_MMAPABLE) {
    122			array_size = PAGE_ALIGN(array_size);
    123			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
    124		} else {
    125			array_size += (u64) max_entries * elem_size;
    126		}
    127	}
    128
    129	/* allocate all map elements and zero-initialize them */
    130	if (attr->map_flags & BPF_F_MMAPABLE) {
    131		void *data;
    132
    133		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
    134		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
    135		if (!data)
    136			return ERR_PTR(-ENOMEM);
    137		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
    138			- offsetof(struct bpf_array, value);
    139	} else {
    140		array = bpf_map_area_alloc(array_size, numa_node);
    141	}
    142	if (!array)
    143		return ERR_PTR(-ENOMEM);
    144	array->index_mask = index_mask;
    145	array->map.bypass_spec_v1 = bypass_spec_v1;
    146
    147	/* copy mandatory map attributes */
    148	bpf_map_init_from_attr(&array->map, attr);
    149	array->elem_size = elem_size;
    150
    151	if (percpu && bpf_array_alloc_percpu(array)) {
    152		bpf_map_area_free(array);
    153		return ERR_PTR(-ENOMEM);
    154	}
    155
    156	return &array->map;
    157}
    158
    159/* Called from syscall or from eBPF program */
    160static void *array_map_lookup_elem(struct bpf_map *map, void *key)
    161{
    162	struct bpf_array *array = container_of(map, struct bpf_array, map);
    163	u32 index = *(u32 *)key;
    164
    165	if (unlikely(index >= array->map.max_entries))
    166		return NULL;
    167
    168	return array->value + array->elem_size * (index & array->index_mask);
    169}
    170
    171static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
    172				       u32 off)
    173{
    174	struct bpf_array *array = container_of(map, struct bpf_array, map);
    175
    176	if (map->max_entries != 1)
    177		return -ENOTSUPP;
    178	if (off >= map->value_size)
    179		return -EINVAL;
    180
    181	*imm = (unsigned long)array->value;
    182	return 0;
    183}
    184
    185static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
    186				       u32 *off)
    187{
    188	struct bpf_array *array = container_of(map, struct bpf_array, map);
    189	u64 base = (unsigned long)array->value;
    190	u64 range = array->elem_size;
    191
    192	if (map->max_entries != 1)
    193		return -ENOTSUPP;
    194	if (imm < base || imm >= base + range)
    195		return -ENOENT;
    196
    197	*off = imm - base;
    198	return 0;
    199}
    200
    201/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
    202static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
    203{
    204	struct bpf_array *array = container_of(map, struct bpf_array, map);
    205	struct bpf_insn *insn = insn_buf;
    206	u32 elem_size = round_up(map->value_size, 8);
    207	const int ret = BPF_REG_0;
    208	const int map_ptr = BPF_REG_1;
    209	const int index = BPF_REG_2;
    210
    211	if (map->map_flags & BPF_F_INNER_MAP)
    212		return -EOPNOTSUPP;
    213
    214	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
    215	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
    216	if (!map->bypass_spec_v1) {
    217		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
    218		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
    219	} else {
    220		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
    221	}
    222
    223	if (is_power_of_2(elem_size)) {
    224		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
    225	} else {
    226		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
    227	}
    228	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
    229	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
    230	*insn++ = BPF_MOV64_IMM(ret, 0);
    231	return insn - insn_buf;
    232}
    233
    234/* Called from eBPF program */
    235static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
    236{
    237	struct bpf_array *array = container_of(map, struct bpf_array, map);
    238	u32 index = *(u32 *)key;
    239
    240	if (unlikely(index >= array->map.max_entries))
    241		return NULL;
    242
    243	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
    244}
    245
    246static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
    247{
    248	struct bpf_array *array = container_of(map, struct bpf_array, map);
    249	u32 index = *(u32 *)key;
    250
    251	if (cpu >= nr_cpu_ids)
    252		return NULL;
    253
    254	if (unlikely(index >= array->map.max_entries))
    255		return NULL;
    256
    257	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
    258}
    259
    260int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
    261{
    262	struct bpf_array *array = container_of(map, struct bpf_array, map);
    263	u32 index = *(u32 *)key;
    264	void __percpu *pptr;
    265	int cpu, off = 0;
    266	u32 size;
    267
    268	if (unlikely(index >= array->map.max_entries))
    269		return -ENOENT;
    270
    271	/* per_cpu areas are zero-filled and bpf programs can only
    272	 * access 'value_size' of them, so copying rounded areas
    273	 * will not leak any kernel data
    274	 */
    275	size = round_up(map->value_size, 8);
    276	rcu_read_lock();
    277	pptr = array->pptrs[index & array->index_mask];
    278	for_each_possible_cpu(cpu) {
    279		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
    280		off += size;
    281	}
    282	rcu_read_unlock();
    283	return 0;
    284}
    285
    286/* Called from syscall */
    287static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
    288{
    289	struct bpf_array *array = container_of(map, struct bpf_array, map);
    290	u32 index = key ? *(u32 *)key : U32_MAX;
    291	u32 *next = (u32 *)next_key;
    292
    293	if (index >= array->map.max_entries) {
    294		*next = 0;
    295		return 0;
    296	}
    297
    298	if (index == array->map.max_entries - 1)
    299		return -ENOENT;
    300
    301	*next = index + 1;
    302	return 0;
    303}
    304
    305static void check_and_free_fields(struct bpf_array *arr, void *val)
    306{
    307	if (map_value_has_timer(&arr->map))
    308		bpf_timer_cancel_and_free(val + arr->map.timer_off);
    309	if (map_value_has_kptrs(&arr->map))
    310		bpf_map_free_kptrs(&arr->map, val);
    311}
    312
    313/* Called from syscall or from eBPF program */
    314static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
    315				 u64 map_flags)
    316{
    317	struct bpf_array *array = container_of(map, struct bpf_array, map);
    318	u32 index = *(u32 *)key;
    319	char *val;
    320
    321	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
    322		/* unknown flags */
    323		return -EINVAL;
    324
    325	if (unlikely(index >= array->map.max_entries))
    326		/* all elements were pre-allocated, cannot insert a new one */
    327		return -E2BIG;
    328
    329	if (unlikely(map_flags & BPF_NOEXIST))
    330		/* all elements already exist */
    331		return -EEXIST;
    332
    333	if (unlikely((map_flags & BPF_F_LOCK) &&
    334		     !map_value_has_spin_lock(map)))
    335		return -EINVAL;
    336
    337	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
    338		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
    339		       value, map->value_size);
    340	} else {
    341		val = array->value +
    342			array->elem_size * (index & array->index_mask);
    343		if (map_flags & BPF_F_LOCK)
    344			copy_map_value_locked(map, val, value, false);
    345		else
    346			copy_map_value(map, val, value);
    347		check_and_free_fields(array, val);
    348	}
    349	return 0;
    350}
    351
    352int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
    353			    u64 map_flags)
    354{
    355	struct bpf_array *array = container_of(map, struct bpf_array, map);
    356	u32 index = *(u32 *)key;
    357	void __percpu *pptr;
    358	int cpu, off = 0;
    359	u32 size;
    360
    361	if (unlikely(map_flags > BPF_EXIST))
    362		/* unknown flags */
    363		return -EINVAL;
    364
    365	if (unlikely(index >= array->map.max_entries))
    366		/* all elements were pre-allocated, cannot insert a new one */
    367		return -E2BIG;
    368
    369	if (unlikely(map_flags == BPF_NOEXIST))
    370		/* all elements already exist */
    371		return -EEXIST;
    372
    373	/* the user space will provide round_up(value_size, 8) bytes that
    374	 * will be copied into per-cpu area. bpf programs can only access
    375	 * value_size of it. During lookup the same extra bytes will be
    376	 * returned or zeros which were zero-filled by percpu_alloc,
    377	 * so no kernel data leaks possible
    378	 */
    379	size = round_up(map->value_size, 8);
    380	rcu_read_lock();
    381	pptr = array->pptrs[index & array->index_mask];
    382	for_each_possible_cpu(cpu) {
    383		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
    384		off += size;
    385	}
    386	rcu_read_unlock();
    387	return 0;
    388}
    389
    390/* Called from syscall or from eBPF program */
    391static int array_map_delete_elem(struct bpf_map *map, void *key)
    392{
    393	return -EINVAL;
    394}
    395
    396static void *array_map_vmalloc_addr(struct bpf_array *array)
    397{
    398	return (void *)round_down((unsigned long)array, PAGE_SIZE);
    399}
    400
    401static void array_map_free_timers(struct bpf_map *map)
    402{
    403	struct bpf_array *array = container_of(map, struct bpf_array, map);
    404	int i;
    405
    406	/* We don't reset or free kptr on uref dropping to zero. */
    407	if (!map_value_has_timer(map))
    408		return;
    409
    410	for (i = 0; i < array->map.max_entries; i++)
    411		bpf_timer_cancel_and_free(array->value + array->elem_size * i +
    412					  map->timer_off);
    413}
    414
    415/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
    416static void array_map_free(struct bpf_map *map)
    417{
    418	struct bpf_array *array = container_of(map, struct bpf_array, map);
    419	int i;
    420
    421	if (map_value_has_kptrs(map)) {
    422		for (i = 0; i < array->map.max_entries; i++)
    423			bpf_map_free_kptrs(map, array->value + array->elem_size * i);
    424		bpf_map_free_kptr_off_tab(map);
    425	}
    426
    427	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
    428		bpf_array_free_percpu(array);
    429
    430	if (array->map.map_flags & BPF_F_MMAPABLE)
    431		bpf_map_area_free(array_map_vmalloc_addr(array));
    432	else
    433		bpf_map_area_free(array);
    434}
    435
    436static void array_map_seq_show_elem(struct bpf_map *map, void *key,
    437				    struct seq_file *m)
    438{
    439	void *value;
    440
    441	rcu_read_lock();
    442
    443	value = array_map_lookup_elem(map, key);
    444	if (!value) {
    445		rcu_read_unlock();
    446		return;
    447	}
    448
    449	if (map->btf_key_type_id)
    450		seq_printf(m, "%u: ", *(u32 *)key);
    451	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
    452	seq_puts(m, "\n");
    453
    454	rcu_read_unlock();
    455}
    456
    457static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
    458					   struct seq_file *m)
    459{
    460	struct bpf_array *array = container_of(map, struct bpf_array, map);
    461	u32 index = *(u32 *)key;
    462	void __percpu *pptr;
    463	int cpu;
    464
    465	rcu_read_lock();
    466
    467	seq_printf(m, "%u: {\n", *(u32 *)key);
    468	pptr = array->pptrs[index & array->index_mask];
    469	for_each_possible_cpu(cpu) {
    470		seq_printf(m, "\tcpu%d: ", cpu);
    471		btf_type_seq_show(map->btf, map->btf_value_type_id,
    472				  per_cpu_ptr(pptr, cpu), m);
    473		seq_puts(m, "\n");
    474	}
    475	seq_puts(m, "}\n");
    476
    477	rcu_read_unlock();
    478}
    479
    480static int array_map_check_btf(const struct bpf_map *map,
    481			       const struct btf *btf,
    482			       const struct btf_type *key_type,
    483			       const struct btf_type *value_type)
    484{
    485	u32 int_data;
    486
    487	/* One exception for keyless BTF: .bss/.data/.rodata map */
    488	if (btf_type_is_void(key_type)) {
    489		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
    490		    map->max_entries != 1)
    491			return -EINVAL;
    492
    493		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
    494			return -EINVAL;
    495
    496		return 0;
    497	}
    498
    499	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
    500		return -EINVAL;
    501
    502	int_data = *(u32 *)(key_type + 1);
    503	/* bpf array can only take a u32 key. This check makes sure
    504	 * that the btf matches the attr used during map_create.
    505	 */
    506	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
    507		return -EINVAL;
    508
    509	return 0;
    510}
    511
    512static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
    513{
    514	struct bpf_array *array = container_of(map, struct bpf_array, map);
    515	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
    516
    517	if (!(map->map_flags & BPF_F_MMAPABLE))
    518		return -EINVAL;
    519
    520	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
    521	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
    522		return -EINVAL;
    523
    524	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
    525				   vma->vm_pgoff + pgoff);
    526}
    527
    528static bool array_map_meta_equal(const struct bpf_map *meta0,
    529				 const struct bpf_map *meta1)
    530{
    531	if (!bpf_map_meta_equal(meta0, meta1))
    532		return false;
    533	return meta0->map_flags & BPF_F_INNER_MAP ? true :
    534	       meta0->max_entries == meta1->max_entries;
    535}
    536
    537struct bpf_iter_seq_array_map_info {
    538	struct bpf_map *map;
    539	void *percpu_value_buf;
    540	u32 index;
    541};
    542
    543static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
    544{
    545	struct bpf_iter_seq_array_map_info *info = seq->private;
    546	struct bpf_map *map = info->map;
    547	struct bpf_array *array;
    548	u32 index;
    549
    550	if (info->index >= map->max_entries)
    551		return NULL;
    552
    553	if (*pos == 0)
    554		++*pos;
    555	array = container_of(map, struct bpf_array, map);
    556	index = info->index & array->index_mask;
    557	if (info->percpu_value_buf)
    558	       return array->pptrs[index];
    559	return array->value + array->elem_size * index;
    560}
    561
    562static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    563{
    564	struct bpf_iter_seq_array_map_info *info = seq->private;
    565	struct bpf_map *map = info->map;
    566	struct bpf_array *array;
    567	u32 index;
    568
    569	++*pos;
    570	++info->index;
    571	if (info->index >= map->max_entries)
    572		return NULL;
    573
    574	array = container_of(map, struct bpf_array, map);
    575	index = info->index & array->index_mask;
    576	if (info->percpu_value_buf)
    577	       return array->pptrs[index];
    578	return array->value + array->elem_size * index;
    579}
    580
    581static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
    582{
    583	struct bpf_iter_seq_array_map_info *info = seq->private;
    584	struct bpf_iter__bpf_map_elem ctx = {};
    585	struct bpf_map *map = info->map;
    586	struct bpf_iter_meta meta;
    587	struct bpf_prog *prog;
    588	int off = 0, cpu = 0;
    589	void __percpu **pptr;
    590	u32 size;
    591
    592	meta.seq = seq;
    593	prog = bpf_iter_get_info(&meta, v == NULL);
    594	if (!prog)
    595		return 0;
    596
    597	ctx.meta = &meta;
    598	ctx.map = info->map;
    599	if (v) {
    600		ctx.key = &info->index;
    601
    602		if (!info->percpu_value_buf) {
    603			ctx.value = v;
    604		} else {
    605			pptr = v;
    606			size = round_up(map->value_size, 8);
    607			for_each_possible_cpu(cpu) {
    608				bpf_long_memcpy(info->percpu_value_buf + off,
    609						per_cpu_ptr(pptr, cpu),
    610						size);
    611				off += size;
    612			}
    613			ctx.value = info->percpu_value_buf;
    614		}
    615	}
    616
    617	return bpf_iter_run_prog(prog, &ctx);
    618}
    619
    620static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
    621{
    622	return __bpf_array_map_seq_show(seq, v);
    623}
    624
    625static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
    626{
    627	if (!v)
    628		(void)__bpf_array_map_seq_show(seq, NULL);
    629}
    630
    631static int bpf_iter_init_array_map(void *priv_data,
    632				   struct bpf_iter_aux_info *aux)
    633{
    634	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
    635	struct bpf_map *map = aux->map;
    636	void *value_buf;
    637	u32 buf_size;
    638
    639	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
    640		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
    641		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
    642		if (!value_buf)
    643			return -ENOMEM;
    644
    645		seq_info->percpu_value_buf = value_buf;
    646	}
    647
    648	seq_info->map = map;
    649	return 0;
    650}
    651
    652static void bpf_iter_fini_array_map(void *priv_data)
    653{
    654	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
    655
    656	kfree(seq_info->percpu_value_buf);
    657}
    658
    659static const struct seq_operations bpf_array_map_seq_ops = {
    660	.start	= bpf_array_map_seq_start,
    661	.next	= bpf_array_map_seq_next,
    662	.stop	= bpf_array_map_seq_stop,
    663	.show	= bpf_array_map_seq_show,
    664};
    665
    666static const struct bpf_iter_seq_info iter_seq_info = {
    667	.seq_ops		= &bpf_array_map_seq_ops,
    668	.init_seq_private	= bpf_iter_init_array_map,
    669	.fini_seq_private	= bpf_iter_fini_array_map,
    670	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
    671};
    672
    673static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
    674				   void *callback_ctx, u64 flags)
    675{
    676	u32 i, key, num_elems = 0;
    677	struct bpf_array *array;
    678	bool is_percpu;
    679	u64 ret = 0;
    680	void *val;
    681
    682	if (flags != 0)
    683		return -EINVAL;
    684
    685	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
    686	array = container_of(map, struct bpf_array, map);
    687	if (is_percpu)
    688		migrate_disable();
    689	for (i = 0; i < map->max_entries; i++) {
    690		if (is_percpu)
    691			val = this_cpu_ptr(array->pptrs[i]);
    692		else
    693			val = array->value + array->elem_size * i;
    694		num_elems++;
    695		key = i;
    696		ret = callback_fn((u64)(long)map, (u64)(long)&key,
    697				  (u64)(long)val, (u64)(long)callback_ctx, 0);
    698		/* return value: 0 - continue, 1 - stop and return */
    699		if (ret)
    700			break;
    701	}
    702
    703	if (is_percpu)
    704		migrate_enable();
    705	return num_elems;
    706}
    707
    708BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
    709const struct bpf_map_ops array_map_ops = {
    710	.map_meta_equal = array_map_meta_equal,
    711	.map_alloc_check = array_map_alloc_check,
    712	.map_alloc = array_map_alloc,
    713	.map_free = array_map_free,
    714	.map_get_next_key = array_map_get_next_key,
    715	.map_release_uref = array_map_free_timers,
    716	.map_lookup_elem = array_map_lookup_elem,
    717	.map_update_elem = array_map_update_elem,
    718	.map_delete_elem = array_map_delete_elem,
    719	.map_gen_lookup = array_map_gen_lookup,
    720	.map_direct_value_addr = array_map_direct_value_addr,
    721	.map_direct_value_meta = array_map_direct_value_meta,
    722	.map_mmap = array_map_mmap,
    723	.map_seq_show_elem = array_map_seq_show_elem,
    724	.map_check_btf = array_map_check_btf,
    725	.map_lookup_batch = generic_map_lookup_batch,
    726	.map_update_batch = generic_map_update_batch,
    727	.map_set_for_each_callback_args = map_set_for_each_callback_args,
    728	.map_for_each_callback = bpf_for_each_array_elem,
    729	.map_btf_id = &array_map_btf_ids[0],
    730	.iter_seq_info = &iter_seq_info,
    731};
    732
    733const struct bpf_map_ops percpu_array_map_ops = {
    734	.map_meta_equal = bpf_map_meta_equal,
    735	.map_alloc_check = array_map_alloc_check,
    736	.map_alloc = array_map_alloc,
    737	.map_free = array_map_free,
    738	.map_get_next_key = array_map_get_next_key,
    739	.map_lookup_elem = percpu_array_map_lookup_elem,
    740	.map_update_elem = array_map_update_elem,
    741	.map_delete_elem = array_map_delete_elem,
    742	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
    743	.map_seq_show_elem = percpu_array_map_seq_show_elem,
    744	.map_check_btf = array_map_check_btf,
    745	.map_lookup_batch = generic_map_lookup_batch,
    746	.map_update_batch = generic_map_update_batch,
    747	.map_set_for_each_callback_args = map_set_for_each_callback_args,
    748	.map_for_each_callback = bpf_for_each_array_elem,
    749	.map_btf_id = &array_map_btf_ids[0],
    750	.iter_seq_info = &iter_seq_info,
    751};
    752
    753static int fd_array_map_alloc_check(union bpf_attr *attr)
    754{
    755	/* only file descriptors can be stored in this type of map */
    756	if (attr->value_size != sizeof(u32))
    757		return -EINVAL;
    758	/* Program read-only/write-only not supported for special maps yet. */
    759	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
    760		return -EINVAL;
    761	return array_map_alloc_check(attr);
    762}
    763
    764static void fd_array_map_free(struct bpf_map *map)
    765{
    766	struct bpf_array *array = container_of(map, struct bpf_array, map);
    767	int i;
    768
    769	/* make sure it's empty */
    770	for (i = 0; i < array->map.max_entries; i++)
    771		BUG_ON(array->ptrs[i] != NULL);
    772
    773	bpf_map_area_free(array);
    774}
    775
    776static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
    777{
    778	return ERR_PTR(-EOPNOTSUPP);
    779}
    780
    781/* only called from syscall */
    782int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
    783{
    784	void **elem, *ptr;
    785	int ret =  0;
    786
    787	if (!map->ops->map_fd_sys_lookup_elem)
    788		return -ENOTSUPP;
    789
    790	rcu_read_lock();
    791	elem = array_map_lookup_elem(map, key);
    792	if (elem && (ptr = READ_ONCE(*elem)))
    793		*value = map->ops->map_fd_sys_lookup_elem(ptr);
    794	else
    795		ret = -ENOENT;
    796	rcu_read_unlock();
    797
    798	return ret;
    799}
    800
    801/* only called from syscall */
    802int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
    803				 void *key, void *value, u64 map_flags)
    804{
    805	struct bpf_array *array = container_of(map, struct bpf_array, map);
    806	void *new_ptr, *old_ptr;
    807	u32 index = *(u32 *)key, ufd;
    808
    809	if (map_flags != BPF_ANY)
    810		return -EINVAL;
    811
    812	if (index >= array->map.max_entries)
    813		return -E2BIG;
    814
    815	ufd = *(u32 *)value;
    816	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
    817	if (IS_ERR(new_ptr))
    818		return PTR_ERR(new_ptr);
    819
    820	if (map->ops->map_poke_run) {
    821		mutex_lock(&array->aux->poke_mutex);
    822		old_ptr = xchg(array->ptrs + index, new_ptr);
    823		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
    824		mutex_unlock(&array->aux->poke_mutex);
    825	} else {
    826		old_ptr = xchg(array->ptrs + index, new_ptr);
    827	}
    828
    829	if (old_ptr)
    830		map->ops->map_fd_put_ptr(old_ptr);
    831	return 0;
    832}
    833
    834static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
    835{
    836	struct bpf_array *array = container_of(map, struct bpf_array, map);
    837	void *old_ptr;
    838	u32 index = *(u32 *)key;
    839
    840	if (index >= array->map.max_entries)
    841		return -E2BIG;
    842
    843	if (map->ops->map_poke_run) {
    844		mutex_lock(&array->aux->poke_mutex);
    845		old_ptr = xchg(array->ptrs + index, NULL);
    846		map->ops->map_poke_run(map, index, old_ptr, NULL);
    847		mutex_unlock(&array->aux->poke_mutex);
    848	} else {
    849		old_ptr = xchg(array->ptrs + index, NULL);
    850	}
    851
    852	if (old_ptr) {
    853		map->ops->map_fd_put_ptr(old_ptr);
    854		return 0;
    855	} else {
    856		return -ENOENT;
    857	}
    858}
    859
    860static void *prog_fd_array_get_ptr(struct bpf_map *map,
    861				   struct file *map_file, int fd)
    862{
    863	struct bpf_prog *prog = bpf_prog_get(fd);
    864
    865	if (IS_ERR(prog))
    866		return prog;
    867
    868	if (!bpf_prog_map_compatible(map, prog)) {
    869		bpf_prog_put(prog);
    870		return ERR_PTR(-EINVAL);
    871	}
    872
    873	return prog;
    874}
    875
    876static void prog_fd_array_put_ptr(void *ptr)
    877{
    878	bpf_prog_put(ptr);
    879}
    880
    881static u32 prog_fd_array_sys_lookup_elem(void *ptr)
    882{
    883	return ((struct bpf_prog *)ptr)->aux->id;
    884}
    885
    886/* decrement refcnt of all bpf_progs that are stored in this map */
    887static void bpf_fd_array_map_clear(struct bpf_map *map)
    888{
    889	struct bpf_array *array = container_of(map, struct bpf_array, map);
    890	int i;
    891
    892	for (i = 0; i < array->map.max_entries; i++)
    893		fd_array_map_delete_elem(map, &i);
    894}
    895
    896static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
    897					 struct seq_file *m)
    898{
    899	void **elem, *ptr;
    900	u32 prog_id;
    901
    902	rcu_read_lock();
    903
    904	elem = array_map_lookup_elem(map, key);
    905	if (elem) {
    906		ptr = READ_ONCE(*elem);
    907		if (ptr) {
    908			seq_printf(m, "%u: ", *(u32 *)key);
    909			prog_id = prog_fd_array_sys_lookup_elem(ptr);
    910			btf_type_seq_show(map->btf, map->btf_value_type_id,
    911					  &prog_id, m);
    912			seq_puts(m, "\n");
    913		}
    914	}
    915
    916	rcu_read_unlock();
    917}
    918
    919struct prog_poke_elem {
    920	struct list_head list;
    921	struct bpf_prog_aux *aux;
    922};
    923
    924static int prog_array_map_poke_track(struct bpf_map *map,
    925				     struct bpf_prog_aux *prog_aux)
    926{
    927	struct prog_poke_elem *elem;
    928	struct bpf_array_aux *aux;
    929	int ret = 0;
    930
    931	aux = container_of(map, struct bpf_array, map)->aux;
    932	mutex_lock(&aux->poke_mutex);
    933	list_for_each_entry(elem, &aux->poke_progs, list) {
    934		if (elem->aux == prog_aux)
    935			goto out;
    936	}
    937
    938	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
    939	if (!elem) {
    940		ret = -ENOMEM;
    941		goto out;
    942	}
    943
    944	INIT_LIST_HEAD(&elem->list);
    945	/* We must track the program's aux info at this point in time
    946	 * since the program pointer itself may not be stable yet, see
    947	 * also comment in prog_array_map_poke_run().
    948	 */
    949	elem->aux = prog_aux;
    950
    951	list_add_tail(&elem->list, &aux->poke_progs);
    952out:
    953	mutex_unlock(&aux->poke_mutex);
    954	return ret;
    955}
    956
    957static void prog_array_map_poke_untrack(struct bpf_map *map,
    958					struct bpf_prog_aux *prog_aux)
    959{
    960	struct prog_poke_elem *elem, *tmp;
    961	struct bpf_array_aux *aux;
    962
    963	aux = container_of(map, struct bpf_array, map)->aux;
    964	mutex_lock(&aux->poke_mutex);
    965	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
    966		if (elem->aux == prog_aux) {
    967			list_del_init(&elem->list);
    968			kfree(elem);
    969			break;
    970		}
    971	}
    972	mutex_unlock(&aux->poke_mutex);
    973}
    974
    975static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
    976				    struct bpf_prog *old,
    977				    struct bpf_prog *new)
    978{
    979	u8 *old_addr, *new_addr, *old_bypass_addr;
    980	struct prog_poke_elem *elem;
    981	struct bpf_array_aux *aux;
    982
    983	aux = container_of(map, struct bpf_array, map)->aux;
    984	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
    985
    986	list_for_each_entry(elem, &aux->poke_progs, list) {
    987		struct bpf_jit_poke_descriptor *poke;
    988		int i, ret;
    989
    990		for (i = 0; i < elem->aux->size_poke_tab; i++) {
    991			poke = &elem->aux->poke_tab[i];
    992
    993			/* Few things to be aware of:
    994			 *
    995			 * 1) We can only ever access aux in this context, but
    996			 *    not aux->prog since it might not be stable yet and
    997			 *    there could be danger of use after free otherwise.
    998			 * 2) Initially when we start tracking aux, the program
    999			 *    is not JITed yet and also does not have a kallsyms
   1000			 *    entry. We skip these as poke->tailcall_target_stable
   1001			 *    is not active yet. The JIT will do the final fixup
   1002			 *    before setting it stable. The various
   1003			 *    poke->tailcall_target_stable are successively
   1004			 *    activated, so tail call updates can arrive from here
   1005			 *    while JIT is still finishing its final fixup for
   1006			 *    non-activated poke entries.
   1007			 * 3) On program teardown, the program's kallsym entry gets
   1008			 *    removed out of RCU callback, but we can only untrack
   1009			 *    from sleepable context, therefore bpf_arch_text_poke()
   1010			 *    might not see that this is in BPF text section and
   1011			 *    bails out with -EINVAL. As these are unreachable since
   1012			 *    RCU grace period already passed, we simply skip them.
   1013			 * 4) Also programs reaching refcount of zero while patching
   1014			 *    is in progress is okay since we're protected under
   1015			 *    poke_mutex and untrack the programs before the JIT
   1016			 *    buffer is freed. When we're still in the middle of
   1017			 *    patching and suddenly kallsyms entry of the program
   1018			 *    gets evicted, we just skip the rest which is fine due
   1019			 *    to point 3).
   1020			 * 5) Any other error happening below from bpf_arch_text_poke()
   1021			 *    is a unexpected bug.
   1022			 */
   1023			if (!READ_ONCE(poke->tailcall_target_stable))
   1024				continue;
   1025			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
   1026				continue;
   1027			if (poke->tail_call.map != map ||
   1028			    poke->tail_call.key != key)
   1029				continue;
   1030
   1031			old_bypass_addr = old ? NULL : poke->bypass_addr;
   1032			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
   1033			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
   1034
   1035			if (new) {
   1036				ret = bpf_arch_text_poke(poke->tailcall_target,
   1037							 BPF_MOD_JUMP,
   1038							 old_addr, new_addr);
   1039				BUG_ON(ret < 0 && ret != -EINVAL);
   1040				if (!old) {
   1041					ret = bpf_arch_text_poke(poke->tailcall_bypass,
   1042								 BPF_MOD_JUMP,
   1043								 poke->bypass_addr,
   1044								 NULL);
   1045					BUG_ON(ret < 0 && ret != -EINVAL);
   1046				}
   1047			} else {
   1048				ret = bpf_arch_text_poke(poke->tailcall_bypass,
   1049							 BPF_MOD_JUMP,
   1050							 old_bypass_addr,
   1051							 poke->bypass_addr);
   1052				BUG_ON(ret < 0 && ret != -EINVAL);
   1053				/* let other CPUs finish the execution of program
   1054				 * so that it will not possible to expose them
   1055				 * to invalid nop, stack unwind, nop state
   1056				 */
   1057				if (!ret)
   1058					synchronize_rcu();
   1059				ret = bpf_arch_text_poke(poke->tailcall_target,
   1060							 BPF_MOD_JUMP,
   1061							 old_addr, NULL);
   1062				BUG_ON(ret < 0 && ret != -EINVAL);
   1063			}
   1064		}
   1065	}
   1066}
   1067
   1068static void prog_array_map_clear_deferred(struct work_struct *work)
   1069{
   1070	struct bpf_map *map = container_of(work, struct bpf_array_aux,
   1071					   work)->map;
   1072	bpf_fd_array_map_clear(map);
   1073	bpf_map_put(map);
   1074}
   1075
   1076static void prog_array_map_clear(struct bpf_map *map)
   1077{
   1078	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
   1079						 map)->aux;
   1080	bpf_map_inc(map);
   1081	schedule_work(&aux->work);
   1082}
   1083
   1084static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
   1085{
   1086	struct bpf_array_aux *aux;
   1087	struct bpf_map *map;
   1088
   1089	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
   1090	if (!aux)
   1091		return ERR_PTR(-ENOMEM);
   1092
   1093	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
   1094	INIT_LIST_HEAD(&aux->poke_progs);
   1095	mutex_init(&aux->poke_mutex);
   1096
   1097	map = array_map_alloc(attr);
   1098	if (IS_ERR(map)) {
   1099		kfree(aux);
   1100		return map;
   1101	}
   1102
   1103	container_of(map, struct bpf_array, map)->aux = aux;
   1104	aux->map = map;
   1105
   1106	return map;
   1107}
   1108
   1109static void prog_array_map_free(struct bpf_map *map)
   1110{
   1111	struct prog_poke_elem *elem, *tmp;
   1112	struct bpf_array_aux *aux;
   1113
   1114	aux = container_of(map, struct bpf_array, map)->aux;
   1115	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
   1116		list_del_init(&elem->list);
   1117		kfree(elem);
   1118	}
   1119	kfree(aux);
   1120	fd_array_map_free(map);
   1121}
   1122
   1123/* prog_array->aux->{type,jited} is a runtime binding.
   1124 * Doing static check alone in the verifier is not enough.
   1125 * Thus, prog_array_map cannot be used as an inner_map
   1126 * and map_meta_equal is not implemented.
   1127 */
   1128const struct bpf_map_ops prog_array_map_ops = {
   1129	.map_alloc_check = fd_array_map_alloc_check,
   1130	.map_alloc = prog_array_map_alloc,
   1131	.map_free = prog_array_map_free,
   1132	.map_poke_track = prog_array_map_poke_track,
   1133	.map_poke_untrack = prog_array_map_poke_untrack,
   1134	.map_poke_run = prog_array_map_poke_run,
   1135	.map_get_next_key = array_map_get_next_key,
   1136	.map_lookup_elem = fd_array_map_lookup_elem,
   1137	.map_delete_elem = fd_array_map_delete_elem,
   1138	.map_fd_get_ptr = prog_fd_array_get_ptr,
   1139	.map_fd_put_ptr = prog_fd_array_put_ptr,
   1140	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
   1141	.map_release_uref = prog_array_map_clear,
   1142	.map_seq_show_elem = prog_array_map_seq_show_elem,
   1143	.map_btf_id = &array_map_btf_ids[0],
   1144};
   1145
   1146static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
   1147						   struct file *map_file)
   1148{
   1149	struct bpf_event_entry *ee;
   1150
   1151	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
   1152	if (ee) {
   1153		ee->event = perf_file->private_data;
   1154		ee->perf_file = perf_file;
   1155		ee->map_file = map_file;
   1156	}
   1157
   1158	return ee;
   1159}
   1160
   1161static void __bpf_event_entry_free(struct rcu_head *rcu)
   1162{
   1163	struct bpf_event_entry *ee;
   1164
   1165	ee = container_of(rcu, struct bpf_event_entry, rcu);
   1166	fput(ee->perf_file);
   1167	kfree(ee);
   1168}
   1169
   1170static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
   1171{
   1172	call_rcu(&ee->rcu, __bpf_event_entry_free);
   1173}
   1174
   1175static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
   1176					 struct file *map_file, int fd)
   1177{
   1178	struct bpf_event_entry *ee;
   1179	struct perf_event *event;
   1180	struct file *perf_file;
   1181	u64 value;
   1182
   1183	perf_file = perf_event_get(fd);
   1184	if (IS_ERR(perf_file))
   1185		return perf_file;
   1186
   1187	ee = ERR_PTR(-EOPNOTSUPP);
   1188	event = perf_file->private_data;
   1189	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
   1190		goto err_out;
   1191
   1192	ee = bpf_event_entry_gen(perf_file, map_file);
   1193	if (ee)
   1194		return ee;
   1195	ee = ERR_PTR(-ENOMEM);
   1196err_out:
   1197	fput(perf_file);
   1198	return ee;
   1199}
   1200
   1201static void perf_event_fd_array_put_ptr(void *ptr)
   1202{
   1203	bpf_event_entry_free_rcu(ptr);
   1204}
   1205
   1206static void perf_event_fd_array_release(struct bpf_map *map,
   1207					struct file *map_file)
   1208{
   1209	struct bpf_array *array = container_of(map, struct bpf_array, map);
   1210	struct bpf_event_entry *ee;
   1211	int i;
   1212
   1213	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
   1214		return;
   1215
   1216	rcu_read_lock();
   1217	for (i = 0; i < array->map.max_entries; i++) {
   1218		ee = READ_ONCE(array->ptrs[i]);
   1219		if (ee && ee->map_file == map_file)
   1220			fd_array_map_delete_elem(map, &i);
   1221	}
   1222	rcu_read_unlock();
   1223}
   1224
   1225static void perf_event_fd_array_map_free(struct bpf_map *map)
   1226{
   1227	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
   1228		bpf_fd_array_map_clear(map);
   1229	fd_array_map_free(map);
   1230}
   1231
   1232const struct bpf_map_ops perf_event_array_map_ops = {
   1233	.map_meta_equal = bpf_map_meta_equal,
   1234	.map_alloc_check = fd_array_map_alloc_check,
   1235	.map_alloc = array_map_alloc,
   1236	.map_free = perf_event_fd_array_map_free,
   1237	.map_get_next_key = array_map_get_next_key,
   1238	.map_lookup_elem = fd_array_map_lookup_elem,
   1239	.map_delete_elem = fd_array_map_delete_elem,
   1240	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
   1241	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
   1242	.map_release = perf_event_fd_array_release,
   1243	.map_check_btf = map_check_no_btf,
   1244	.map_btf_id = &array_map_btf_ids[0],
   1245};
   1246
   1247#ifdef CONFIG_CGROUPS
   1248static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
   1249				     struct file *map_file /* not used */,
   1250				     int fd)
   1251{
   1252	return cgroup_get_from_fd(fd);
   1253}
   1254
   1255static void cgroup_fd_array_put_ptr(void *ptr)
   1256{
   1257	/* cgroup_put free cgrp after a rcu grace period */
   1258	cgroup_put(ptr);
   1259}
   1260
   1261static void cgroup_fd_array_free(struct bpf_map *map)
   1262{
   1263	bpf_fd_array_map_clear(map);
   1264	fd_array_map_free(map);
   1265}
   1266
   1267const struct bpf_map_ops cgroup_array_map_ops = {
   1268	.map_meta_equal = bpf_map_meta_equal,
   1269	.map_alloc_check = fd_array_map_alloc_check,
   1270	.map_alloc = array_map_alloc,
   1271	.map_free = cgroup_fd_array_free,
   1272	.map_get_next_key = array_map_get_next_key,
   1273	.map_lookup_elem = fd_array_map_lookup_elem,
   1274	.map_delete_elem = fd_array_map_delete_elem,
   1275	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
   1276	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
   1277	.map_check_btf = map_check_no_btf,
   1278	.map_btf_id = &array_map_btf_ids[0],
   1279};
   1280#endif
   1281
   1282static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
   1283{
   1284	struct bpf_map *map, *inner_map_meta;
   1285
   1286	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
   1287	if (IS_ERR(inner_map_meta))
   1288		return inner_map_meta;
   1289
   1290	map = array_map_alloc(attr);
   1291	if (IS_ERR(map)) {
   1292		bpf_map_meta_free(inner_map_meta);
   1293		return map;
   1294	}
   1295
   1296	map->inner_map_meta = inner_map_meta;
   1297
   1298	return map;
   1299}
   1300
   1301static void array_of_map_free(struct bpf_map *map)
   1302{
   1303	/* map->inner_map_meta is only accessed by syscall which
   1304	 * is protected by fdget/fdput.
   1305	 */
   1306	bpf_map_meta_free(map->inner_map_meta);
   1307	bpf_fd_array_map_clear(map);
   1308	fd_array_map_free(map);
   1309}
   1310
   1311static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
   1312{
   1313	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
   1314
   1315	if (!inner_map)
   1316		return NULL;
   1317
   1318	return READ_ONCE(*inner_map);
   1319}
   1320
   1321static int array_of_map_gen_lookup(struct bpf_map *map,
   1322				   struct bpf_insn *insn_buf)
   1323{
   1324	struct bpf_array *array = container_of(map, struct bpf_array, map);
   1325	u32 elem_size = round_up(map->value_size, 8);
   1326	struct bpf_insn *insn = insn_buf;
   1327	const int ret = BPF_REG_0;
   1328	const int map_ptr = BPF_REG_1;
   1329	const int index = BPF_REG_2;
   1330
   1331	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
   1332	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
   1333	if (!map->bypass_spec_v1) {
   1334		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
   1335		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
   1336	} else {
   1337		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
   1338	}
   1339	if (is_power_of_2(elem_size))
   1340		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
   1341	else
   1342		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
   1343	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
   1344	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
   1345	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
   1346	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
   1347	*insn++ = BPF_MOV64_IMM(ret, 0);
   1348
   1349	return insn - insn_buf;
   1350}
   1351
   1352const struct bpf_map_ops array_of_maps_map_ops = {
   1353	.map_alloc_check = fd_array_map_alloc_check,
   1354	.map_alloc = array_of_map_alloc,
   1355	.map_free = array_of_map_free,
   1356	.map_get_next_key = array_map_get_next_key,
   1357	.map_lookup_elem = array_of_map_lookup_elem,
   1358	.map_delete_elem = fd_array_map_delete_elem,
   1359	.map_fd_get_ptr = bpf_map_fd_get_ptr,
   1360	.map_fd_put_ptr = bpf_map_fd_put_ptr,
   1361	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
   1362	.map_gen_lookup = array_of_map_gen_lookup,
   1363	.map_lookup_batch = generic_map_lookup_batch,
   1364	.map_update_batch = generic_map_update_batch,
   1365	.map_check_btf = map_check_no_btf,
   1366	.map_btf_id = &array_map_btf_ids[0],
   1367};