cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf-lirc.c (7775B)


      1// SPDX-License-Identifier: GPL-2.0
      2// bpf-lirc.c - handles bpf
      3//
      4// Copyright (C) 2018 Sean Young <sean@mess.org>
      5
      6#include <linux/bpf.h>
      7#include <linux/filter.h>
      8#include <linux/bpf_lirc.h>
      9#include "rc-core-priv.h"
     10
     11#define lirc_rcu_dereference(p)						\
     12	rcu_dereference_protected(p, lockdep_is_held(&ir_raw_handler_lock))
     13
     14/*
     15 * BPF interface for raw IR
     16 */
     17const struct bpf_prog_ops lirc_mode2_prog_ops = {
     18};
     19
     20BPF_CALL_1(bpf_rc_repeat, u32*, sample)
     21{
     22	struct ir_raw_event_ctrl *ctrl;
     23
     24	ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
     25
     26	rc_repeat(ctrl->dev);
     27
     28	return 0;
     29}
     30
     31static const struct bpf_func_proto rc_repeat_proto = {
     32	.func	   = bpf_rc_repeat,
     33	.gpl_only  = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
     34	.ret_type  = RET_INTEGER,
     35	.arg1_type = ARG_PTR_TO_CTX,
     36};
     37
     38BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
     39	   u32, toggle)
     40{
     41	struct ir_raw_event_ctrl *ctrl;
     42
     43	ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
     44
     45	rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
     46
     47	return 0;
     48}
     49
     50static const struct bpf_func_proto rc_keydown_proto = {
     51	.func	   = bpf_rc_keydown,
     52	.gpl_only  = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
     53	.ret_type  = RET_INTEGER,
     54	.arg1_type = ARG_PTR_TO_CTX,
     55	.arg2_type = ARG_ANYTHING,
     56	.arg3_type = ARG_ANYTHING,
     57	.arg4_type = ARG_ANYTHING,
     58};
     59
     60BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y)
     61{
     62	struct ir_raw_event_ctrl *ctrl;
     63
     64	ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
     65
     66	input_report_rel(ctrl->dev->input_dev, REL_X, rel_x);
     67	input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y);
     68	input_sync(ctrl->dev->input_dev);
     69
     70	return 0;
     71}
     72
     73static const struct bpf_func_proto rc_pointer_rel_proto = {
     74	.func	   = bpf_rc_pointer_rel,
     75	.gpl_only  = true,
     76	.ret_type  = RET_INTEGER,
     77	.arg1_type = ARG_PTR_TO_CTX,
     78	.arg2_type = ARG_ANYTHING,
     79	.arg3_type = ARG_ANYTHING,
     80};
     81
     82static const struct bpf_func_proto *
     83lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
     84{
     85	switch (func_id) {
     86	case BPF_FUNC_rc_repeat:
     87		return &rc_repeat_proto;
     88	case BPF_FUNC_rc_keydown:
     89		return &rc_keydown_proto;
     90	case BPF_FUNC_rc_pointer_rel:
     91		return &rc_pointer_rel_proto;
     92	case BPF_FUNC_map_lookup_elem:
     93		return &bpf_map_lookup_elem_proto;
     94	case BPF_FUNC_map_update_elem:
     95		return &bpf_map_update_elem_proto;
     96	case BPF_FUNC_map_delete_elem:
     97		return &bpf_map_delete_elem_proto;
     98	case BPF_FUNC_map_push_elem:
     99		return &bpf_map_push_elem_proto;
    100	case BPF_FUNC_map_pop_elem:
    101		return &bpf_map_pop_elem_proto;
    102	case BPF_FUNC_map_peek_elem:
    103		return &bpf_map_peek_elem_proto;
    104	case BPF_FUNC_ktime_get_ns:
    105		return &bpf_ktime_get_ns_proto;
    106	case BPF_FUNC_ktime_get_boot_ns:
    107		return &bpf_ktime_get_boot_ns_proto;
    108	case BPF_FUNC_tail_call:
    109		return &bpf_tail_call_proto;
    110	case BPF_FUNC_get_prandom_u32:
    111		return &bpf_get_prandom_u32_proto;
    112	case BPF_FUNC_trace_printk:
    113		if (perfmon_capable())
    114			return bpf_get_trace_printk_proto();
    115		fallthrough;
    116	default:
    117		return NULL;
    118	}
    119}
    120
    121static bool lirc_mode2_is_valid_access(int off, int size,
    122				       enum bpf_access_type type,
    123				       const struct bpf_prog *prog,
    124				       struct bpf_insn_access_aux *info)
    125{
    126	/* We have one field of u32 */
    127	return type == BPF_READ && off == 0 && size == sizeof(u32);
    128}
    129
    130const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
    131	.get_func_proto  = lirc_mode2_func_proto,
    132	.is_valid_access = lirc_mode2_is_valid_access
    133};
    134
    135#define BPF_MAX_PROGS 64
    136
    137static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
    138{
    139	struct bpf_prog_array *old_array;
    140	struct bpf_prog_array *new_array;
    141	struct ir_raw_event_ctrl *raw;
    142	int ret;
    143
    144	if (rcdev->driver_type != RC_DRIVER_IR_RAW)
    145		return -EINVAL;
    146
    147	ret = mutex_lock_interruptible(&ir_raw_handler_lock);
    148	if (ret)
    149		return ret;
    150
    151	raw = rcdev->raw;
    152	if (!raw) {
    153		ret = -ENODEV;
    154		goto unlock;
    155	}
    156
    157	old_array = lirc_rcu_dereference(raw->progs);
    158	if (old_array && bpf_prog_array_length(old_array) >= BPF_MAX_PROGS) {
    159		ret = -E2BIG;
    160		goto unlock;
    161	}
    162
    163	ret = bpf_prog_array_copy(old_array, NULL, prog, 0, &new_array);
    164	if (ret < 0)
    165		goto unlock;
    166
    167	rcu_assign_pointer(raw->progs, new_array);
    168	bpf_prog_array_free(old_array);
    169
    170unlock:
    171	mutex_unlock(&ir_raw_handler_lock);
    172	return ret;
    173}
    174
    175static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
    176{
    177	struct bpf_prog_array *old_array;
    178	struct bpf_prog_array *new_array;
    179	struct ir_raw_event_ctrl *raw;
    180	int ret;
    181
    182	if (rcdev->driver_type != RC_DRIVER_IR_RAW)
    183		return -EINVAL;
    184
    185	ret = mutex_lock_interruptible(&ir_raw_handler_lock);
    186	if (ret)
    187		return ret;
    188
    189	raw = rcdev->raw;
    190	if (!raw) {
    191		ret = -ENODEV;
    192		goto unlock;
    193	}
    194
    195	old_array = lirc_rcu_dereference(raw->progs);
    196	ret = bpf_prog_array_copy(old_array, prog, NULL, 0, &new_array);
    197	/*
    198	 * Do not use bpf_prog_array_delete_safe() as we would end up
    199	 * with a dummy entry in the array, and the we would free the
    200	 * dummy in lirc_bpf_free()
    201	 */
    202	if (ret)
    203		goto unlock;
    204
    205	rcu_assign_pointer(raw->progs, new_array);
    206	bpf_prog_array_free(old_array);
    207	bpf_prog_put(prog);
    208unlock:
    209	mutex_unlock(&ir_raw_handler_lock);
    210	return ret;
    211}
    212
    213void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
    214{
    215	struct ir_raw_event_ctrl *raw = rcdev->raw;
    216
    217	raw->bpf_sample = sample;
    218
    219	if (raw->progs) {
    220		rcu_read_lock();
    221		bpf_prog_run_array(rcu_dereference(raw->progs),
    222				   &raw->bpf_sample, bpf_prog_run);
    223		rcu_read_unlock();
    224	}
    225}
    226
    227/*
    228 * This should be called once the rc thread has been stopped, so there can be
    229 * no concurrent bpf execution.
    230 *
    231 * Should be called with the ir_raw_handler_lock held.
    232 */
    233void lirc_bpf_free(struct rc_dev *rcdev)
    234{
    235	struct bpf_prog_array_item *item;
    236	struct bpf_prog_array *array;
    237
    238	array = lirc_rcu_dereference(rcdev->raw->progs);
    239	if (!array)
    240		return;
    241
    242	for (item = array->items; item->prog; item++)
    243		bpf_prog_put(item->prog);
    244
    245	bpf_prog_array_free(array);
    246}
    247
    248int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
    249{
    250	struct rc_dev *rcdev;
    251	int ret;
    252
    253	if (attr->attach_flags)
    254		return -EINVAL;
    255
    256	rcdev = rc_dev_get_from_fd(attr->target_fd);
    257	if (IS_ERR(rcdev))
    258		return PTR_ERR(rcdev);
    259
    260	ret = lirc_bpf_attach(rcdev, prog);
    261
    262	put_device(&rcdev->dev);
    263
    264	return ret;
    265}
    266
    267int lirc_prog_detach(const union bpf_attr *attr)
    268{
    269	struct bpf_prog *prog;
    270	struct rc_dev *rcdev;
    271	int ret;
    272
    273	if (attr->attach_flags)
    274		return -EINVAL;
    275
    276	prog = bpf_prog_get_type(attr->attach_bpf_fd,
    277				 BPF_PROG_TYPE_LIRC_MODE2);
    278	if (IS_ERR(prog))
    279		return PTR_ERR(prog);
    280
    281	rcdev = rc_dev_get_from_fd(attr->target_fd);
    282	if (IS_ERR(rcdev)) {
    283		bpf_prog_put(prog);
    284		return PTR_ERR(rcdev);
    285	}
    286
    287	ret = lirc_bpf_detach(rcdev, prog);
    288
    289	bpf_prog_put(prog);
    290	put_device(&rcdev->dev);
    291
    292	return ret;
    293}
    294
    295int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
    296{
    297	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
    298	struct bpf_prog_array *progs;
    299	struct rc_dev *rcdev;
    300	u32 cnt, flags = 0;
    301	int ret;
    302
    303	if (attr->query.query_flags)
    304		return -EINVAL;
    305
    306	rcdev = rc_dev_get_from_fd(attr->query.target_fd);
    307	if (IS_ERR(rcdev))
    308		return PTR_ERR(rcdev);
    309
    310	if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
    311		ret = -EINVAL;
    312		goto put;
    313	}
    314
    315	ret = mutex_lock_interruptible(&ir_raw_handler_lock);
    316	if (ret)
    317		goto put;
    318
    319	progs = lirc_rcu_dereference(rcdev->raw->progs);
    320	cnt = progs ? bpf_prog_array_length(progs) : 0;
    321
    322	if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
    323		ret = -EFAULT;
    324		goto unlock;
    325	}
    326
    327	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
    328		ret = -EFAULT;
    329		goto unlock;
    330	}
    331
    332	if (attr->query.prog_cnt != 0 && prog_ids && cnt)
    333		ret = bpf_prog_array_copy_to_user(progs, prog_ids,
    334						  attr->query.prog_cnt);
    335
    336unlock:
    337	mutex_unlock(&ir_raw_handler_lock);
    338put:
    339	put_device(&rcdev->dev);
    340
    341	return ret;
    342}