From e40fbbf0572c5e41dc87ad79001748ed399ce32d Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Wed, 19 Jan 2022 11:44:40 +0000 Subject: uapi/bpf: Add missing description and returns for helper documentation Both description and returns section will become mandatory for helpers and syscalls in a later commit to generate man pages. This commit also adds in the documentation that BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN for anyone searching for the syscall in the generated man pages. Signed-off-by: Usama Arif Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220119114442.1452088-1-usama.arif@bytedance.com --- include/uapi/linux/bpf.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b0383d371b9a..a9c96c21330a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -330,6 +330,8 @@ union bpf_iter_link_info { * *ctx_out*, *data_in* and *data_out* must be NULL. * *repeat* must be zero. * + * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. + * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. @@ -1775,6 +1777,8 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. * * u64 bpf_get_current_pid_tgid(void) + * Description + * Get the current pid and tgid. * Return * A 64-bit integer containing the current tgid and pid, and * created as such: @@ -1782,6 +1786,8 @@ union bpf_attr { * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) + * Description + * Get the current uid and gid. * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. @@ -2256,6 +2262,8 @@ union bpf_attr { * The 32-bit hash. * * u64 bpf_get_current_task(void) + * Description + * Get the current task. * Return * A pointer to the current task struct. * @@ -2369,6 +2377,8 @@ union bpf_attr { * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. + * Return + * void. * * long bpf_get_numa_node_id(void) * Description @@ -2466,6 +2476,8 @@ union bpf_attr { * A 8-byte long unique number or 0 if *sk* is NULL. * * u32 bpf_get_socket_uid(struct sk_buff *skb) + * Description + * Get the owner UID of the socked associated to *skb*. * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a @@ -3240,6 +3252,9 @@ union bpf_attr { * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) + * Description + * Get the current cgroup id based on the cgroup within which + * the current task is running. * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. -- cgit v1.2.3-71-gd317 From b44123b4a3dcad4664d3a0f72c011ffd4c9c4d93 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 16 Dec 2021 02:04:27 +0000 Subject: bpf: Add cgroup helpers bpf_{get,set}_retval to get/set syscall return value The helpers continue to use int for retval because all the hooks are int-returning rather than long-returning. The return value of bpf_set_retval is int for future-proofing, in case in the future there may be errors trying to set the retval. After the previous patch, if a program rejects a syscall by returning 0, an -EPERM will be generated no matter if the retval is already set to -err. This patch change it being forced only if retval is not -err. This is because we want to support, for example, invoking bpf_set_retval(-EINVAL) and return 0, and have the syscall return value be -EINVAL not -EPERM. For BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY, the prior behavior is that, if the return value is NET_XMIT_DROP, the packet is silently dropped. We preserve this behavior for backward compatibility reasons, so even if an errno is set, the errno does not return to caller. However, setting a non-err to retval cannot propagate so this is not allowed and we return a -EFAULT in that case. Signed-off-by: YiFei Zhu Reviewed-by: Stanislav Fomichev Link: https://lore.kernel.org/r/b4013fd5d16bed0b01977c1fafdeae12e1de61fb.1639619851.git.zhuyifei@google.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 10 ++++++---- include/uapi/linux/bpf.h | 18 ++++++++++++++++++ kernel/bpf/cgroup.c | 38 +++++++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 18 ++++++++++++++++++ 4 files changed, 79 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 7b0c11f414d0..dce54eb0aae8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1299,7 +1299,7 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); - if (!(func_ret & 1)) + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval)) run_ctx.retval = -EPERM; *(ret_flags) |= (func_ret >> 1); item++; @@ -1329,7 +1329,7 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; - if (!run_prog(prog, ctx)) + if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) run_ctx.retval = -EPERM; item++; } @@ -1389,7 +1389,7 @@ out: * 0: NET_XMIT_SUCCESS skb should be transmitted * 1: NET_XMIT_DROP skb should be dropped and cn * 2: NET_XMIT_CN skb should be transmitted and cn - * 3: -EPERM skb should be dropped + * 3: -err skb should be dropped */ #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ ({ \ @@ -1398,10 +1398,12 @@ out: u32 _ret; \ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ + if (_ret && !IS_ERR_VALUE((long)_ret)) \ + _ret = -EFAULT; \ if (!_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ - _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret = (_cn ? NET_XMIT_DROP : _ret); \ _ret; \ }) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a9c96c21330a..fe2272defcd9 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5033,6 +5033,22 @@ union bpf_attr { * * Return * The number of arguments of the traced function. + * + * int bpf_get_retval(void) + * Description + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * The syscall's return value. + * + * int bpf_set_retval(int retval) + * Description + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5221,6 +5237,8 @@ union bpf_attr { FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ + FN(get_retval), \ + FN(set_retval), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index b6fad0bbf5a7..279ebbed75a5 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1044,7 +1044,7 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr * NET_XMIT_CN (2) - continue with packet output and notify TCP * to call cwr - * -EPERM - drop packet + * -err - drop packet * * For ingress packets, this function will return -EPERM if any * attached program was found and if it returned != 1 during execution. @@ -1080,6 +1080,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, } else { ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb, 0); + if (ret && !IS_ERR_VALUE((long)ret)) + ret = -EFAULT; } bpf_restore_data_end(skb, saved_data_end); __skb_pull(skb, offset); @@ -1205,6 +1207,36 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, return ret; } +BPF_CALL_0(bpf_get_retval) +{ + struct bpf_cg_run_ctx *ctx = + container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); + + return ctx->retval; +} + +static const struct bpf_func_proto bpf_get_retval_proto = { + .func = bpf_get_retval, + .gpl_only = false, + .ret_type = RET_INTEGER, +}; + +BPF_CALL_1(bpf_set_retval, int, retval) +{ + struct bpf_cg_run_ctx *ctx = + container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx); + + ctx->retval = retval; + return 0; +} + +static const struct bpf_func_proto bpf_set_retval_proto = { + .func = bpf_set_retval, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1217,6 +1249,10 @@ cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; + case BPF_FUNC_get_retval: + return &bpf_get_retval_proto; + case BPF_FUNC_set_retval: + return &bpf_set_retval_proto; default: return bpf_base_func_proto(func_id); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a9c96c21330a..fe2272defcd9 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5033,6 +5033,22 @@ union bpf_attr { * * Return * The number of arguments of the traced function. + * + * int bpf_get_retval(void) + * Description + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * The syscall's return value. + * + * int bpf_set_retval(int retval) + * Description + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5221,6 +5237,8 @@ union bpf_attr { FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ + FN(get_retval), \ + FN(set_retval), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From c2f2cdbeffda7b153c19e0f3d73149c41026c0db Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:09:52 +0100 Subject: bpf: introduce BPF_F_XDP_HAS_FRAGS flag in prog_flags loading the ebpf program Introduce BPF_F_XDP_HAS_FRAGS and the related field in bpf_prog_aux in order to notify the driver the loaded program support xdp frags. Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/db2e8075b7032a356003f407d1b0deb99adaa0ed.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 5 +++++ kernel/bpf/syscall.c | 4 +++- tools/include/uapi/linux/bpf.h | 5 +++++ 4 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 80e3387ea3af..e93ed028a030 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -933,6 +933,7 @@ struct bpf_prog_aux { bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; + bool xdp_has_frags; struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index fe2272defcd9..945649c67e03 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9e0631f091a6..f29090643c6e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2217,7 +2217,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) BPF_F_ANY_ALIGNMENT | BPF_F_TEST_STATE_FREQ | BPF_F_SLEEPABLE | - BPF_F_TEST_RND_HI32)) + BPF_F_TEST_RND_HI32 | + BPF_F_XDP_HAS_FRAGS)) return -EINVAL; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && @@ -2303,6 +2304,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) prog->aux->dst_prog = dst_prog; prog->aux->offload_requested = !!attr->prog_ifindex; prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; + prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; err = security_bpf_prog_alloc(prog->aux); if (err) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index fe2272defcd9..945649c67e03 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1113,6 +1113,11 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * -- cgit v1.2.3-71-gd317 From 0165cc817075cf701e4289838f1d925ff1911b3e Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:09:54 +0100 Subject: bpf: introduce bpf_xdp_get_buff_len helper Introduce bpf_xdp_get_buff_len helper in order to return the xdp buffer total size (linear and paged area) Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/aac9ac3504c84026cf66a3c71b7c5ae89bc991be.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/net/xdp.h | 14 ++++++++++++++ include/uapi/linux/bpf.h | 7 +++++++ net/core/filter.c | 15 +++++++++++++++ tools/include/uapi/linux/bpf.h | 7 +++++++ 4 files changed, 43 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/xdp.h b/include/net/xdp.h index 8463dea8b4db..52b593321956 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -145,6 +145,20 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp) return (struct skb_shared_info *)xdp_data_hard_end(xdp); } +static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp) +{ + unsigned int len = xdp->data_end - xdp->data; + struct skb_shared_info *sinfo; + + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); + len += sinfo->xdp_frags_size; +out: + return len; +} + struct xdp_frame { void *data; u16 len; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 945649c67e03..5a28772063f6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5054,6 +5054,12 @@ union bpf_attr { * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) + * Description + * Get the total size of a given xdp buff (linear and paged area) + * Return + * The total size of a given xdp buffer. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5244,6 +5250,7 @@ union bpf_attr { FN(get_func_arg_cnt), \ FN(get_retval), \ FN(set_retval), \ + FN(xdp_get_buff_len), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/net/core/filter.c b/net/core/filter.c index f73a84c75970..a7f03bbca465 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3783,6 +3783,19 @@ static const struct bpf_func_proto sk_skb_change_head_proto = { .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; + +BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp) +{ + return xdp_get_buff_len(xdp); +} + +static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = { + .func = bpf_xdp_get_buff_len, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return xdp_data_meta_unsupported(xdp) ? 0 : @@ -7533,6 +7546,8 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_redirect_map_proto; case BPF_FUNC_xdp_adjust_tail: return &bpf_xdp_adjust_tail_proto; + case BPF_FUNC_xdp_get_buff_len: + return &bpf_xdp_get_buff_len_proto; case BPF_FUNC_fib_lookup: return &bpf_xdp_fib_lookup_proto; case BPF_FUNC_check_mtu: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 945649c67e03..5a28772063f6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5054,6 +5054,12 @@ union bpf_attr { * This helper is currently supported by cgroup programs only. * Return * 0 on success, or a negative error in case of failure. + * + * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) + * Description + * Get the total size of a given xdp buff (linear and paged area) + * Return + * The total size of a given xdp buffer. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5244,6 +5250,7 @@ union bpf_attr { FN(get_func_arg_cnt), \ FN(get_retval), \ FN(set_retval), \ + FN(xdp_get_buff_len), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From 3f364222d032eea6b245780e845ad213dab28cdd Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:10:03 +0100 Subject: net: xdp: introduce bpf_xdp_pointer utility routine Similar to skb_header_pointer, introduce bpf_xdp_pointer utility routine to return a pointer to a given position in the xdp_buff if the requested area (offset + len) is contained in a contiguous memory area otherwise it will be copied in a bounce buffer provided by the caller. Similar to the tc counterpart, introduce the two following xdp helpers: - bpf_xdp_load_bytes - bpf_xdp_store_bytes Reviewed-by: Eelco Chaudron Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Acked-by: Jakub Kicinski Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/ab285c1efdd5b7a9d361348b1e7d3ef49f6382b3.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 18 +++++ net/core/filter.c | 176 ++++++++++++++++++++++++++++++++--------- tools/include/uapi/linux/bpf.h | 18 +++++ 3 files changed, 174 insertions(+), 38 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5a28772063f6..16a7574292a5 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5060,6 +5060,22 @@ union bpf_attr { * Get the total size of a given xdp buff (linear and paged area) * Return * The total size of a given xdp buffer. + * + * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5251,6 +5267,8 @@ union bpf_attr { FN(get_retval), \ FN(set_retval), \ FN(xdp_get_buff_len), \ + FN(xdp_load_bytes), \ + FN(xdp_store_bytes), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/net/core/filter.c b/net/core/filter.c index e4ce138bf925..945ccaaab3cb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3839,6 +3839,138 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; +static void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, + void *buf, unsigned long len, bool flush) +{ + unsigned long ptr_len, ptr_off = 0; + skb_frag_t *next_frag, *end_frag; + struct skb_shared_info *sinfo; + void *src, *dst; + u8 *ptr_buf; + + if (likely(xdp->data_end - xdp->data >= off + len)) { + src = flush ? buf : xdp->data + off; + dst = flush ? xdp->data + off : buf; + memcpy(dst, src, len); + return; + } + + sinfo = xdp_get_shared_info_from_buff(xdp); + end_frag = &sinfo->frags[sinfo->nr_frags]; + next_frag = &sinfo->frags[0]; + + ptr_len = xdp->data_end - xdp->data; + ptr_buf = xdp->data; + + while (true) { + if (off < ptr_off + ptr_len) { + unsigned long copy_off = off - ptr_off; + unsigned long copy_len = min(len, ptr_len - copy_off); + + src = flush ? buf : ptr_buf + copy_off; + dst = flush ? ptr_buf + copy_off : buf; + memcpy(dst, src, copy_len); + + off += copy_len; + len -= copy_len; + buf += copy_len; + } + + if (!len || next_frag == end_frag) + break; + + ptr_off += ptr_len; + ptr_buf = skb_frag_address(next_frag); + ptr_len = skb_frag_size(next_frag); + next_frag++; + } +} + +static void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + u32 size = xdp->data_end - xdp->data; + void *addr = xdp->data; + int i; + + if (unlikely(offset > 0xffff || len > 0xffff)) + return ERR_PTR(-EFAULT); + + if (offset + len > xdp_get_buff_len(xdp)) + return ERR_PTR(-EINVAL); + + if (offset < size) /* linear area */ + goto out; + + offset -= size; + for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ + u32 frag_size = skb_frag_size(&sinfo->frags[i]); + + if (offset < frag_size) { + addr = skb_frag_address(&sinfo->frags[i]); + size = frag_size; + break; + } + offset -= frag_size; + } +out: + return offset + len < size ? addr + offset : NULL; +} + +BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset, + void *, buf, u32, len) +{ + void *ptr; + + ptr = bpf_xdp_pointer(xdp, offset, len); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + if (!ptr) + bpf_xdp_copy_buf(xdp, offset, buf, len, false); + else + memcpy(buf, ptr, len); + + return 0; +} + +static const struct bpf_func_proto bpf_xdp_load_bytes_proto = { + .func = bpf_xdp_load_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + +BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset, + void *, buf, u32, len) +{ + void *ptr; + + ptr = bpf_xdp_pointer(xdp, offset, len); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + if (!ptr) + bpf_xdp_copy_buf(xdp, offset, buf, len, true); + else + memcpy(ptr, buf, len); + + return 0; +} + +static const struct bpf_func_proto bpf_xdp_store_bytes_proto = { + .func = bpf_xdp_store_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); @@ -4677,48 +4809,12 @@ static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = { }; #endif -static unsigned long bpf_xdp_copy(void *dst_buff, const void *ctx, +static unsigned long bpf_xdp_copy(void *dst, const void *ctx, unsigned long off, unsigned long len) { struct xdp_buff *xdp = (struct xdp_buff *)ctx; - unsigned long ptr_len, ptr_off = 0; - skb_frag_t *next_frag, *end_frag; - struct skb_shared_info *sinfo; - u8 *ptr_buf; - - if (likely(xdp->data_end - xdp->data >= off + len)) { - memcpy(dst_buff, xdp->data + off, len); - return 0; - } - - sinfo = xdp_get_shared_info_from_buff(xdp); - end_frag = &sinfo->frags[sinfo->nr_frags]; - next_frag = &sinfo->frags[0]; - - ptr_len = xdp->data_end - xdp->data; - ptr_buf = xdp->data; - - while (true) { - if (off < ptr_off + ptr_len) { - unsigned long copy_off = off - ptr_off; - unsigned long copy_len = min(len, ptr_len - copy_off); - - memcpy(dst_buff, ptr_buf + copy_off, copy_len); - - off += copy_len; - len -= copy_len; - dst_buff += copy_len; - } - - if (!len || next_frag == end_frag) - break; - - ptr_off += ptr_len; - ptr_buf = skb_frag_address(next_frag); - ptr_len = skb_frag_size(next_frag); - next_frag++; - } + bpf_xdp_copy_buf(xdp, off, dst, len, false); return 0; } @@ -7660,6 +7756,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_adjust_tail_proto; case BPF_FUNC_xdp_get_buff_len: return &bpf_xdp_get_buff_len_proto; + case BPF_FUNC_xdp_load_bytes: + return &bpf_xdp_load_bytes_proto; + case BPF_FUNC_xdp_store_bytes: + return &bpf_xdp_store_bytes_proto; case BPF_FUNC_fib_lookup: return &bpf_xdp_fib_lookup_proto; case BPF_FUNC_check_mtu: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 5a28772063f6..16a7574292a5 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5060,6 +5060,22 @@ union bpf_attr { * Get the total size of a given xdp buff (linear and paged area) * Return * The total size of a given xdp buffer. + * + * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5251,6 +5267,8 @@ union bpf_attr { FN(get_retval), \ FN(set_retval), \ FN(xdp_get_buff_len), \ + FN(xdp_load_bytes), \ + FN(xdp_store_bytes), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From 376040e47334c6dc6a939a32197acceb00fe4acf Mon Sep 17 00:00:00 2001 From: Kenny Yu Date: Mon, 24 Jan 2022 10:54:01 -0800 Subject: bpf: Add bpf_copy_from_user_task() helper This adds a helper for bpf programs to read the memory of other tasks. As an example use case at Meta, we are using a bpf task iterator program and this new helper to print C++ async stack traces for all threads of a given process. Signed-off-by: Kenny Yu Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124185403.468466-3-kennyyu@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 11 +++++++++++ kernel/bpf/helpers.c | 34 ++++++++++++++++++++++++++++++++++ kernel/trace/bpf_trace.c | 2 ++ tools/include/uapi/linux/bpf.h | 11 +++++++++++ 5 files changed, 59 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8c92c974bd12..394305a5e02f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2243,6 +2243,7 @@ extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; extern const struct bpf_func_proto bpf_find_vma_proto; extern const struct bpf_func_proto bpf_loop_proto; extern const struct bpf_func_proto bpf_strncmp_proto; +extern const struct bpf_func_proto bpf_copy_from_user_task_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 16a7574292a5..4a2f7041ebae 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5076,6 +5076,16 @@ union bpf_attr { * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) + * Description + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * Return + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5269,6 +5279,7 @@ union bpf_attr { FN(xdp_get_buff_len), \ FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ + FN(copy_from_user_task), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 01cfdf40c838..ed2780b76cc1 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "../../lib/kstrtox.h" @@ -671,6 +672,39 @@ const struct bpf_func_proto bpf_copy_from_user_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, + const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) +{ + int ret; + + /* flags is not used yet */ + if (unlikely(flags)) + return -EINVAL; + + if (unlikely(!size)) + return 0; + + ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); + if (ret == size) + return 0; + + memset(dst, 0, size); + /* Return -EFAULT for partial read */ + return ret < 0 ? ret : -EFAULT; +} + +const struct bpf_func_proto bpf_copy_from_user_task_proto = { + .func = bpf_copy_from_user_task, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_BTF_ID, + .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], + .arg5_type = ARG_ANYTHING +}; + BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) { if (cpu >= nr_cpu_ids) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 06a9e220069e..a2024ba32a20 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1235,6 +1235,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_task_stack_proto; case BPF_FUNC_copy_from_user: return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; + case BPF_FUNC_copy_from_user_task: + return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; case BPF_FUNC_per_cpu_ptr: diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 16a7574292a5..4a2f7041ebae 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5076,6 +5076,16 @@ union bpf_attr { * associated to *xdp_md*, at *offset*. * Return * 0 on success, or a negative error in case of failure. + * + * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) + * Description + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * Return + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5269,6 +5279,7 @@ union bpf_attr { FN(xdp_get_buff_len), \ FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ + FN(copy_from_user_task), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From f23653fe64479d96910bfda2b700b1af17c991ac Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Wed, 26 Jan 2022 09:22:54 +0000 Subject: tty: Partially revert the removal of the Cyclades public API Fix a user API regression introduced with commit f76edd8f7ce0 ("tty: cyclades, remove this orphan"), which removed a part of the API and caused compilation errors for user programs using said part, such as GCC 9 in its libsanitizer component[1]: .../libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc:160:10: fatal error: linux/cyclades.h: No such file or directory 160 | #include | ^~~~~~~~~~~~~~~~~~ compilation terminated. make[4]: *** [Makefile:664: sanitizer_platform_limits_posix.lo] Error 1 As the absolute minimum required bring `struct cyclades_monitor' and ioctl numbers back then so as to make the library build again. Add a preprocessor warning as to the obsolescence of the features provided. References: [1] GCC PR sanitizer/100379, "cyclades.h is removed from linux kernel header files", Fixes: f76edd8f7ce0 ("tty: cyclades, remove this orphan") Cc: stable@vger.kernel.org # v5.13+ Reviewed-by: Christoph Hellwig Signed-off-by: Maciej W. Rozycki Link: https://lore.kernel.org/r/alpine.DEB.2.20.2201260733430.11348@tpp.orcam.me.uk Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/cyclades.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 include/uapi/linux/cyclades.h (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h new file mode 100644 index 000000000000..6225c5aebe06 --- /dev/null +++ b/include/uapi/linux/cyclades.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_LINUX_CYCLADES_H +#define _UAPI_LINUX_CYCLADES_H + +#warning "Support for features provided by this header has been removed" +#warning "Please consider updating your code" + +struct cyclades_monitor { + unsigned long int_count; + unsigned long char_count; + unsigned long char_max; + unsigned long char_last; +}; + +#define CYGETMON 0x435901 +#define CYGETTHRESH 0x435902 +#define CYSETTHRESH 0x435903 +#define CYGETDEFTHRESH 0x435904 +#define CYSETDEFTHRESH 0x435905 +#define CYGETTIMEOUT 0x435906 +#define CYSETTIMEOUT 0x435907 +#define CYGETDEFTIMEOUT 0x435908 +#define CYSETDEFTIMEOUT 0x435909 +#define CYSETRFLOW 0x43590a +#define CYGETRFLOW 0x43590b +#define CYSETRTSDTR_INV 0x43590c +#define CYGETRTSDTR_INV 0x43590d +#define CYZSETPOLLCYCLE 0x43590e +#define CYZGETPOLLCYCLE 0x43590f +#define CYGETCD1400VER 0x435910 +#define CYSETWAIT 0x435912 +#define CYGETWAIT 0x435913 + +#endif /* _UAPI_LINUX_CYCLADES_H */ -- cgit v1.2.3-71-gd317 From dd6e631220181162478984d2d46dd979e04d8e75 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 26 Jan 2022 07:49:45 -0500 Subject: KVM: x86: add system attribute to retrieve full set of supported xsave states Because KVM_GET_SUPPORTED_CPUID is meant to be passed (by simple-minded VMMs) to KVM_SET_CPUID2, it cannot include any dynamic xsave states that have not been enabled. Probing those, for example so that they can be passed to ARCH_REQ_XCOMP_GUEST_PERM, requires a new ioctl or arch_prctl. The latter is in fact worse, even though that is what the rest of the API uses, because it would require supported_xcr0 to be moved from the KVM module to the kernel just for this use. In addition, the value would be nonsensical (or an error would have to be returned) until the KVM module is loaded in. Therefore, to limit the growth of system ioctls, add a /dev/kvm variant of KVM_{GET,HAS}_DEVICE_ATTR, and implement it in x86 with just one group (0) and attribute (KVM_X86_XCOMP_GUEST_SUPP). Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 4 +++- arch/x86/include/uapi/asm/kvm.h | 3 +++ arch/x86/kvm/x86.c | 51 +++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 58 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index bb8cfddbb22d..a4267104db50 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -3268,6 +3268,7 @@ number. :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, KVM_CAP_VCPU_ATTRIBUTES for vcpu device + KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set) :Type: device ioctl, vm ioctl, vcpu ioctl :Parameters: struct kvm_device_attr :Returns: 0 on success, -1 on error @@ -3302,7 +3303,8 @@ transferred is defined by the particular attribute. ------------------------ :Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, - KVM_CAP_VCPU_ATTRIBUTES for vcpu device + KVM_CAP_VCPU_ATTRIBUTES for vcpu device + KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device :Type: device ioctl, vm ioctl, vcpu ioctl :Parameters: struct kvm_device_attr :Returns: 0 on success, -1 on error diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 2da3316bb559..bf6e96011dfe 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -452,6 +452,9 @@ struct kvm_sync_regs { #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 +/* attributes for system fd (group 0) */ +#define KVM_X86_XCOMP_GUEST_SUPP 0 + struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76a5dcfcabdb..c25a6ef0ff06 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4230,6 +4230,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SREGS2: case KVM_CAP_EXIT_ON_EMULATION_FAILURE: case KVM_CAP_VCPU_ATTRIBUTES: + case KVM_CAP_SYS_ATTRIBUTES: r = 1; break; case KVM_CAP_EXIT_HYPERCALL: @@ -4343,6 +4344,40 @@ static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr) return uaddr; } +static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr) +{ + u64 __user *uaddr = kvm_get_attr_addr(attr); + + if (attr->group) + return -ENXIO; + + if (IS_ERR(uaddr)) + return PTR_ERR(uaddr); + + switch (attr->attr) { + case KVM_X86_XCOMP_GUEST_SUPP: + if (put_user(supported_xcr0, uaddr)) + return -EFAULT; + return 0; + default: + return -ENXIO; + break; + } +} + +static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr) +{ + if (attr->group) + return -ENXIO; + + switch (attr->attr) { + case KVM_X86_XCOMP_GUEST_SUPP: + return 0; + default: + return -ENXIO; + } +} + long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4431,6 +4466,22 @@ long kvm_arch_dev_ioctl(struct file *filp, case KVM_GET_SUPPORTED_HV_CPUID: r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp); break; + case KVM_GET_DEVICE_ATTR: { + struct kvm_device_attr attr; + r = -EFAULT; + if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) + break; + r = kvm_x86_dev_get_attr(&attr); + break; + } + case KVM_HAS_DEVICE_ATTR: { + struct kvm_device_attr attr; + r = -EFAULT; + if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) + break; + r = kvm_x86_dev_has_attr(&attr); + break; + } default: r = -EINVAL; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 9563d294f181..b46bcdb0cab1 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1133,6 +1133,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 #define KVM_CAP_VM_GPA_BITS 207 #define KVM_CAP_XSAVE2 208 +#define KVM_CAP_SYS_ATTRIBUTES 209 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From 9690ae60429020f38e4aa2540c306f27eb021bc0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 27 Jan 2022 10:42:59 -0800 Subject: ethtool: add header/data split indication For applications running on a mix of platforms it's useful to have a clear indication whether host's NIC supports the geometry requirements of TCP zero-copy. TCP zero-copy Rx requires data to be neatly placed into memory pages. Most NICs can't do that. This patch is adding GET support only, since the NICs I work with either always have the feature enabled or enable it whenever MTU is set to jumbo. In other words I don't need SET. But adding set should be trivial. (The only note on SET is that we will likely want the setting to be "sticky" and use 0 / `unknown` to reset it back to driver default.) Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 8 ++++++++ include/linux/ethtool.h | 2 ++ include/uapi/linux/ethtool_netlink.h | 7 +++++++ net/ethtool/rings.c | 15 ++++++++++----- 4 files changed, 27 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 9d98e0511249..cae28af7a476 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -860,8 +860,16 @@ Kernel response contents: ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring + ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split ==================================== ====== =========================== +``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with +page-flipping TCP zero-copy receive (``getsockopt(TCP_ZEROCOPY_RECEIVE)``). +If enabled the device is configured to place frame headers and data into +separate buffers. The device configuration must make it possible to receive +full memory pages of data, for example because MTU is high enough or through +HW-GRO. + RINGS_SET ========= diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 11efc45de66a..e0853f48b75e 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -70,9 +70,11 @@ enum { /** * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. + * @tcp_data_split: Scatter packet headers and data to separate buffers */ struct kernel_ethtool_ringparam { u32 rx_buf_len; + u8 tcp_data_split; }; /** diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index cca6e474a085..417d4280d7b5 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -318,6 +318,12 @@ enum { /* RINGS */ +enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED, + ETHTOOL_TCP_DATA_SPLIT_ENABLED, +}; + enum { ETHTOOL_A_RINGS_UNSPEC, ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */ @@ -330,6 +336,7 @@ enum { ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */ ETHTOOL_A_RINGS_TX, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ + ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index c1d5f5e0fdc9..18a5035d3bee 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -53,7 +53,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ nla_total_size(sizeof(u32)) + /* _RINGS_TX */ - nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */ + nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ + nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */ } static int rings_fill_reply(struct sk_buff *skb, @@ -61,9 +62,11 @@ static int rings_fill_reply(struct sk_buff *skb, const struct ethnl_reply_data *reply_base) { const struct rings_reply_data *data = RINGS_REPDATA(reply_base); - const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam; + const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; const struct ethtool_ringparam *ringparam = &data->ringparam; + WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); + if ((ringparam->rx_max_pending && (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, ringparam->rx_max_pending) || @@ -84,9 +87,11 @@ static int rings_fill_reply(struct sk_buff *skb, ringparam->tx_max_pending) || nla_put_u32(skb, ETHTOOL_A_RINGS_TX, ringparam->tx_pending))) || - (kernel_ringparam->rx_buf_len && - (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, - kernel_ringparam->rx_buf_len)))) + (kr->rx_buf_len && + (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || + (kr->tcp_data_split && + (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, + kr->tcp_data_split)))) return -EMSGSIZE; return 0; -- cgit v1.2.3-71-gd317 From b2a90f4fcb146d0e033203ab646f0fd22cfa947f Mon Sep 17 00:00:00 2001 From: Sean Young Date: Thu, 13 Jan 2022 11:20:22 +0100 Subject: media: lirc: remove unused lirc features These features have never been implemented by any lirc driver, including staging or out of tree drivers. The ioctls for these feaures were removed in commit d55f09abe24b ("[media] lirc.h: remove several unused ioctls"). So, we can safely remove them. Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab --- .../userspace-api/media/rc/lirc-get-features.rst | 18 ------------------ include/uapi/linux/lirc.h | 4 ---- 2 files changed, 22 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/rc/lirc-get-features.rst b/Documentation/userspace-api/media/rc/lirc-get-features.rst index 4bf25860f932..545137620ead 100644 --- a/Documentation/userspace-api/media/rc/lirc-get-features.rst +++ b/Documentation/userspace-api/media/rc/lirc-get-features.rst @@ -102,12 +102,6 @@ LIRC features The driver supports setting the receive carrier frequency using :ref:`ioctl LIRC_SET_REC_CARRIER `. -.. _LIRC-CAN-SET-REC-DUTY-CYCLE-RANGE: - -``LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE`` - - Unused. Kept just to avoid breaking uAPI. - .. _LIRC-CAN-SET-REC-CARRIER-RANGE: ``LIRC_CAN_SET_REC_CARRIER_RANGE`` @@ -129,12 +123,6 @@ LIRC features The driver supports :ref:`ioctl LIRC_SET_REC_TIMEOUT `. -.. _LIRC-CAN-SET-REC-FILTER: - -``LIRC_CAN_SET_REC_FILTER`` - - Unused. Kept just to avoid breaking uAPI. - .. _LIRC-CAN-MEASURE-CARRIER: ``LIRC_CAN_MEASURE_CARRIER`` @@ -149,12 +137,6 @@ LIRC features The driver supports learning mode using :ref:`ioctl LIRC_SET_WIDEBAND_RECEIVER `. -.. _LIRC-CAN-NOTIFY-DECODE: - -``LIRC_CAN_NOTIFY_DECODE`` - - Unused. Kept just to avoid breaking uAPI. - .. _LIRC-CAN-SEND-RAW: ``LIRC_CAN_SEND_RAW`` diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index 9919f2062b14..a1f9c26ea537 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -72,11 +72,9 @@ #define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) #define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 #define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 #define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 #define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 -#define LIRC_CAN_SET_REC_FILTER 0x08000000 #define LIRC_CAN_MEASURE_CARRIER 0x02000000 #define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 @@ -84,8 +82,6 @@ #define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) #define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) -#define LIRC_CAN_NOTIFY_DECODE 0x01000000 - /*** IOCTL commands for lirc driver ***/ #define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) -- cgit v1.2.3-71-gd317 From 68a99f6a0ebfe9101ea79ba5af1c407a5ad4f629 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sat, 15 Jan 2022 11:19:11 +0100 Subject: media: lirc: report ir receiver overflow If the driver reports that the hardware had an overflow, report this to userspace. It would be nice to know when this happens, and not just get a long space. This change has been tested with lircd, ir-ctl, and ir-keytable. Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/lirc.h.rst.exceptions | 3 +++ Documentation/userspace-api/media/rc/lirc-dev-intro.rst | 11 +++++++++-- drivers/media/rc/lirc_dev.c | 13 ++++++------- drivers/media/rc/rc-loopback.c | 6 +++++- include/uapi/linux/lirc.h | 11 +++++++---- 5 files changed, 30 insertions(+), 14 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/lirc.h.rst.exceptions b/Documentation/userspace-api/media/lirc.h.rst.exceptions index ec86e82d026d..5f31e967bc50 100644 --- a/Documentation/userspace-api/media/lirc.h.rst.exceptions +++ b/Documentation/userspace-api/media/lirc.h.rst.exceptions @@ -11,12 +11,14 @@ ignore define LIRC_SPACE ignore define LIRC_PULSE ignore define LIRC_FREQUENCY ignore define LIRC_TIMEOUT +ignore define LIRC_OVERFLOW ignore define LIRC_VALUE ignore define LIRC_MODE2 ignore define LIRC_IS_SPACE ignore define LIRC_IS_PULSE ignore define LIRC_IS_FREQUENCY ignore define LIRC_IS_TIMEOUT +ignore define LIRC_IS_OVERFLOW ignore define LIRC_MODE2SEND ignore define LIRC_SEND2MODE @@ -75,6 +77,7 @@ ignore define PULSE_MASK ignore define LIRC_MODE2_SPACE ignore define LIRC_MODE2_PULSE ignore define LIRC_MODE2_TIMEOUT +ignore define LIRC_MODE2_OVERFLOW ignore define LIRC_VALUE_MASK ignore define LIRC_MODE2_MASK diff --git a/Documentation/userspace-api/media/rc/lirc-dev-intro.rst b/Documentation/userspace-api/media/rc/lirc-dev-intro.rst index 9a5e5f0aae11..d899331b943f 100644 --- a/Documentation/userspace-api/media/rc/lirc-dev-intro.rst +++ b/Documentation/userspace-api/media/rc/lirc-dev-intro.rst @@ -103,11 +103,11 @@ on the following table. ``LIRC_MODE2_PULSE`` - Signifies the presence of IR in microseconds. + Signifies the presence of IR in microseconds, also known as *flash*. ``LIRC_MODE2_SPACE`` - Signifies absence of IR in microseconds. + Signifies absence of IR in microseconds, also known as *gap*. ``LIRC_MODE2_FREQUENCY`` @@ -121,6 +121,13 @@ on the following table. to no IR being detected, this packet will be sent, with the number of microseconds with no IR. + ``LIRC_MODE2_OVERFLOW`` + + Signifies that the IR receiver encounter an overflow, and some IR + is missing. The IR data after this should be correct again. The + actual value is not important, but this is set to 0xffffff by the + kernel for compatibility with lircd. + .. _lirc-mode-pulse: ``LIRC_MODE_PULSE`` diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index fa4671fc92be..765375bda0c6 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -44,14 +44,13 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev) /* Receiver overflow, data missing */ if (ev.overflow) { /* - * Userspace expects a long space event before the start of - * the signal to use as a sync. This may be done with repeat - * packets and normal samples. But if an overflow has been sent - * then we assume that a long time has passed, so we send a - * space with the maximum time value. + * Send lirc overflow message. This message is unknown to + * lircd, but it will interpret this as a long space as + * long as the value is set to high value. This resets its + * decoder state. */ - sample = LIRC_SPACE(LIRC_VALUE_MASK); - dev_dbg(&dev->dev, "delivering overflow space to lirc_dev\n"); + sample = LIRC_OVERFLOW(LIRC_VALUE_MASK); + dev_dbg(&dev->dev, "delivering overflow to lirc_dev\n"); /* Carrier reports */ } else if (ev.carrier_report) { diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index 6441879fcba1..b356041c5c00 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c @@ -112,7 +112,11 @@ static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count) rawir.pulse = i % 2 ? false : true; rawir.duration = txbuf[i]; - ir_raw_event_store_with_filter(dev, &rawir); + /* simulate overflow if ridiculously long pulse was sent */ + if (rawir.pulse && rawir.duration > MS_TO_US(50)) + ir_raw_event_overflow(dev); + else + ir_raw_event_store_with_filter(dev, &rawir); } if (lodev->carrierreport) { diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index a1f9c26ea537..21c69a6a100d 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -16,14 +16,16 @@ #define LIRC_MODE2_PULSE 0x01000000 #define LIRC_MODE2_FREQUENCY 0x02000000 #define LIRC_MODE2_TIMEOUT 0x03000000 +#define LIRC_MODE2_OVERFLOW 0x04000000 #define LIRC_VALUE_MASK 0x00FFFFFF #define LIRC_MODE2_MASK 0xFF000000 -#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) -#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) -#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) -#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) +#define LIRC_SPACE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) +#define LIRC_PULSE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) +#define LIRC_FREQUENCY(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) +#define LIRC_TIMEOUT(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) +#define LIRC_OVERFLOW(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_OVERFLOW) #define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) #define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) @@ -32,6 +34,7 @@ #define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) #define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) #define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) +#define LIRC_IS_OVERFLOW(val) (LIRC_MODE2(val) == LIRC_MODE2_OVERFLOW) /* used heavily by lirc userspace */ #define lirc_t int -- cgit v1.2.3-71-gd317 From f6c6804c43fa18d3cee64b55490dfbd3bef1363a Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Fri, 28 Jan 2022 15:40:25 +0000 Subject: kvm: Move KVM_GET_XSAVE2 IOCTL definition at the end of kvm.h This way we can more easily find the next free IOCTL number when adding new IOCTLs. Fixes: be50b2065dfa ("kvm: x86: Add support for getting/setting expanded xstate buffer") Signed-off-by: Janosch Frank Message-Id: <20220128154025.102666-1-frankja@linux.ibm.com> Signed-off-by: Paolo Bonzini --- include/uapi/linux/kvm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b46bcdb0cab1..5191b57e1562 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1624,9 +1624,6 @@ struct kvm_enc_region { #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) -/* Available with KVM_CAP_XSAVE2 */ -#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) - struct kvm_s390_pv_sec_parm { __u64 origin; __u64 length; @@ -2048,4 +2045,7 @@ struct kvm_stats_desc { #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + #endif /* __LINUX_KVM_H */ -- cgit v1.2.3-71-gd317 From e187013abeb4c2a7ec8a4bb978844c7e92a1a6ec Mon Sep 17 00:00:00 2001 From: Akhmat Karakotov Date: Mon, 31 Jan 2022 16:31:21 +0300 Subject: txhash: Make rethinking txhash behavior configurable via sysctl Add a per ns sysctl that controls the txhash rethink behavior: net.core.txrehash. When enabled, the same behavior is retained, when disabled, rethink is not performed. Sysctl is enabled by default. Signed-off-by: Akhmat Karakotov Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/netns/core.h | 1 + include/net/sock.h | 34 +++++++++++++++++++++------------- include/uapi/linux/socket.h | 3 +++ net/core/net_namespace.c | 2 ++ net/core/sysctl_net_core.c | 14 ++++++++++++-- 5 files changed, 39 insertions(+), 15 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 552bc25b1933..388244e315e7 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -10,6 +10,7 @@ struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; + u8 sysctl_txrehash; #ifdef CONFIG_PROC_FS struct prot_inuse __percpu *prot_inuse; diff --git a/include/net/sock.h b/include/net/sock.h index ff9b508d9c5f..0540e1b2aeb0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -587,6 +587,18 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk) __tmp | SK_USER_DATA_NOCOPY); \ }) +static inline +struct net *sock_net(const struct sock *sk) +{ + return read_pnet(&sk->sk_net); +} + +static inline +void sock_net_set(struct sock *sk, struct net *net) +{ + write_pnet(&sk->sk_net, net); +} + /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK * or not whether his port will be reused by someone else. SK_FORCE_REUSE @@ -2054,10 +2066,18 @@ static inline void sk_set_txhash(struct sock *sk) static inline bool sk_rethink_txhash(struct sock *sk) { - if (sk->sk_txhash) { + u8 rehash; + + if (!sk->sk_txhash) + return false; + + rehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); + + if (rehash) { sk_set_txhash(sk); return true; } + return false; } @@ -2704,18 +2724,6 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } -static inline -struct net *sock_net(const struct sock *sk) -{ - return read_pnet(&sk->sk_net); -} - -static inline -void sock_net_set(struct sock *sk, struct net *net) -{ - write_pnet(&sk->sk_net, net); -} - static inline bool skb_sk_is_prefetched(struct sk_buff *skb) { diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index eb0a9a5b6e71..0accd6102ece 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -31,4 +31,7 @@ struct __kernel_sockaddr_storage { #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) +#define SOCK_TXREHASH_DISABLED 0 +#define SOCK_TXREHASH_ENABLED 1 + #endif /* _UAPI_LINUX_SOCKET_H */ diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index c53d9aab38ab..8711350085d6 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -364,6 +364,8 @@ out_undo: static int __net_init net_defaults_init_net(struct net *net) { net->core.sysctl_somaxconn = SOMAXCONN; + net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; + return 0; } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 7b4d485aac7a..dbeb8ecbcd98 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -593,6 +593,15 @@ static struct ctl_table netns_core_table[] = { .extra1 = SYSCTL_ZERO, .proc_handler = proc_dointvec_minmax }, + { + .procname = "txrehash", + .data = &init_net.core.sysctl_txrehash, + .maxlen = sizeof(u8), + .mode = 0644, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + .proc_handler = proc_dou8vec_minmax, + }, { } }; @@ -611,7 +620,7 @@ __setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup); static __net_init int sysctl_core_net_init(struct net *net) { - struct ctl_table *tbl; + struct ctl_table *tbl, *tmp; tbl = netns_core_table; if (!net_eq(net, &init_net)) { @@ -619,7 +628,8 @@ static __net_init int sysctl_core_net_init(struct net *net) if (tbl == NULL) goto err_dup; - tbl[0].data = &net->core.sysctl_somaxconn; + for (tmp = tbl; tmp->procname; tmp++) + tmp->data += (char *)net - (char *)&init_net; /* Don't export any sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) { -- cgit v1.2.3-71-gd317 From 26859240e4ee701e0379f08634957adaff67e43a Mon Sep 17 00:00:00 2001 From: Akhmat Karakotov Date: Mon, 31 Jan 2022 16:31:22 +0300 Subject: txhash: Add socket option to control TX hash rethink behavior Add the SO_TXREHASH socket option to control hash rethink behavior per socket. When default mode is set, sockets disable rehash at initialization and use sysctl option when entering listen state. setsockopt() overrides default behavior. Signed-off-by: Akhmat Karakotov Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 2 ++ arch/mips/include/uapi/asm/socket.h | 2 ++ arch/parisc/include/uapi/asm/socket.h | 2 ++ arch/sparc/include/uapi/asm/socket.h | 2 ++ include/net/sock.h | 12 +++--------- include/uapi/asm-generic/socket.h | 2 ++ include/uapi/linux/socket.h | 1 + net/core/sock.c | 13 +++++++++++++ net/ipv4/inet_connection_sock.c | 3 +++ 9 files changed, 30 insertions(+), 9 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 284d28755b8d..7d81535893af 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -133,6 +133,8 @@ #define SO_RESERVE_MEM 73 +#define SO_TXREHASH 74 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 24e0efb360f6..1d55e57b8466 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -144,6 +144,8 @@ #define SO_RESERVE_MEM 73 +#define SO_TXREHASH 74 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 845ddc63c882..654061e0964e 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -125,6 +125,8 @@ #define SO_RESERVE_MEM 0x4047 +#define SO_TXREHASH 0x4048 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 2672dd03faf3..666f81e617ea 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -126,6 +126,8 @@ #define SO_RESERVE_MEM 0x0052 +#define SO_TXREHASH 0x0053 + #if !defined(__KERNEL__) diff --git a/include/net/sock.h b/include/net/sock.h index 0540e1b2aeb0..d6c13f0fba40 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -316,6 +316,7 @@ struct sk_filter; * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_txhash: computed flow hash for use on transmit + * @sk_txrehash: enable TX hash rethink * @sk_filter: socket filtering instructions * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received @@ -491,6 +492,7 @@ struct sock { u32 sk_ack_backlog; u32 sk_max_ack_backlog; kuid_t sk_uid; + u8 sk_txrehash; #ifdef CONFIG_NET_RX_BUSY_POLL u8 sk_prefer_busy_poll; u16 sk_busy_poll_budget; @@ -2066,18 +2068,10 @@ static inline void sk_set_txhash(struct sock *sk) static inline bool sk_rethink_txhash(struct sock *sk) { - u8 rehash; - - if (!sk->sk_txhash) - return false; - - rehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); - - if (rehash) { + if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { sk_set_txhash(sk); return true; } - return false; } diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index c77a1313b3b0..467ca2f28760 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -128,6 +128,8 @@ #define SO_RESERVE_MEM 73 +#define SO_TXREHASH 74 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index 0accd6102ece..51d6bb2f6765 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -31,6 +31,7 @@ struct __kernel_sockaddr_storage { #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) +#define SOCK_TXREHASH_DEFAULT ((u8)-1) #define SOCK_TXREHASH_DISABLED 0 #define SOCK_TXREHASH_ENABLED 1 diff --git a/net/core/sock.c b/net/core/sock.c index cccf21f3618d..5e711b42898f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1447,6 +1447,14 @@ set_sndbuf: break; } + case SO_TXREHASH: + if (val < -1 || val > 1) { + ret = -EINVAL; + break; + } + sk->sk_txrehash = (u8)val; + break; + default: ret = -ENOPROTOOPT; break; @@ -1834,6 +1842,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_reserved_mem; break; + case SO_TXREHASH: + v.val = sk->sk_txrehash; + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). @@ -3279,6 +3291,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_pacing_rate = ~0UL; WRITE_ONCE(sk->sk_pacing_shift, 10); sk->sk_incoming_cpu = -1; + sk->sk_txrehash = SOCK_TXREHASH_DEFAULT; sk_rx_queue_clear(sk); /* diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index fc2a985f6064..b81fb13fc5f4 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1046,6 +1046,9 @@ int inet_csk_listen_start(struct sock *sk) sk->sk_ack_backlog = 0; inet_csk_delack_init(sk); + if (sk->sk_txrehash == SOCK_TXREHASH_DEFAULT) + sk->sk_txrehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); + /* There is race window here: we announce ourselves listening, * but this transition is still not validated by get_port(). * It is OK, because this socket enters to hash table only -- cgit v1.2.3-71-gd317 From 4421a582718ab81608d8486734c18083b822390d Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sun, 30 Jan 2022 12:55:17 +0100 Subject: bpf: Make dst_port field in struct bpf_sock 16-bit wide Menglong Dong reports that the documentation for the dst_port field in struct bpf_sock is inaccurate and confusing. From the BPF program PoV, the field is a zero-padded 16-bit integer in network byte order. The value appears to the BPF user as if laid out in memory as so: offsetof(struct bpf_sock, dst_port) + 0 + 8 +16 0x00 +24 0x00 32-, 16-, and 8-bit wide loads from the field are all allowed, but only if the offset into the field is 0. 32-bit wide loads from dst_port are especially confusing. The loaded value, after converting to host byte order with bpf_ntohl(dst_port), contains the port number in the upper 16-bits. Remove the confusion by splitting the field into two 16-bit fields. For backward compatibility, allow 32-bit wide loads from offsetof(struct bpf_sock, dst_port). While at it, allow loads 8-bit loads at offset [0] and [1] from dst_port. Reported-by: Menglong Dong Signed-off-by: Jakub Sitnicki Link: https://lore.kernel.org/r/20220130115518.213259-2-jakub@cloudflare.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 3 ++- net/core/filter.c | 10 +++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4a2f7041ebae..a7f0ddedac1f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5574,7 +5574,8 @@ struct bpf_sock { __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ - __u32 dst_port; /* network byte order */ + __be16 dst_port; /* network byte order */ + __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; diff --git a/net/core/filter.c b/net/core/filter.c index a06931c27eeb..99a05199a806 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8265,6 +8265,7 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); + int field_size; if (off < 0 || off >= sizeof(struct bpf_sock)) return false; @@ -8276,7 +8277,6 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case offsetof(struct bpf_sock, family): case offsetof(struct bpf_sock, type): case offsetof(struct bpf_sock, protocol): - case offsetof(struct bpf_sock, dst_port): case offsetof(struct bpf_sock, src_port): case offsetof(struct bpf_sock, rx_queue_mapping): case bpf_ctx_range(struct bpf_sock, src_ip4): @@ -8285,6 +8285,14 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); + case bpf_ctx_range(struct bpf_sock, dst_port): + field_size = size == size_default ? + size_default : sizeof_field(struct bpf_sock, dst_port); + bpf_ctx_record_field_size(info, field_size); + return bpf_ctx_narrow_access_ok(off, size, field_size); + case offsetofend(struct bpf_sock, dst_port) ... + offsetof(struct bpf_sock, dst_ip4) - 1: + return false; } return size == size_default; -- cgit v1.2.3-71-gd317 From bfdf4e6208051ed7165b2e92035b4bf11f43eb63 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 27 Jan 2022 10:27:20 -0500 Subject: rseq: Remove broken uapi field layout on 32-bit little endian The rseq rseq_cs.ptr.{ptr32,padding} uapi endianness handling is entirely wrong on 32-bit little endian: a preprocessor logic mistake wrongly uses the big endian field layout on 32-bit little endian architectures. Fortunately, those ptr32 accessors were never used within the kernel, and only meant as a convenience for user-space. Remove those and replace the whole rseq_cs union by a __u64 type, as this is the only thing really needed to express the ABI. Document how 32-bit architectures are meant to interact with this field. Fixes: ec9c82e03a74 ("rseq: uapi: Declare rseq_cs field as union, update includes") Signed-off-by: Mathieu Desnoyers Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20220127152720.25898-1-mathieu.desnoyers@efficios.com --- include/uapi/linux/rseq.h | 20 ++++---------------- kernel/rseq.c | 8 ++++---- 2 files changed, 8 insertions(+), 20 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h index 9a402fdb60e9..77ee207623a9 100644 --- a/include/uapi/linux/rseq.h +++ b/include/uapi/linux/rseq.h @@ -105,23 +105,11 @@ struct rseq { * Read and set by the kernel. Set by user-space with single-copy * atomicity semantics. This field should only be updated by the * thread which registered this data structure. Aligned on 64-bit. + * + * 32-bit architectures should update the low order bits of the + * rseq_cs field, leaving the high order bits initialized to 0. */ - union { - __u64 ptr64; -#ifdef __LP64__ - __u64 ptr; -#else - struct { -#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN) - __u32 padding; /* Initialized to zero. */ - __u32 ptr32; -#else /* LITTLE */ - __u32 ptr32; - __u32 padding; /* Initialized to zero. */ -#endif /* ENDIAN */ - } ptr; -#endif - } rseq_cs; + __u64 rseq_cs; /* * Restartable sequences flags field. diff --git a/kernel/rseq.c b/kernel/rseq.c index 6d45ac3dae7f..97ac20b4f738 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -128,10 +128,10 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) int ret; #ifdef CONFIG_64BIT - if (get_user(ptr, &t->rseq->rseq_cs.ptr64)) + if (get_user(ptr, &t->rseq->rseq_cs)) return -EFAULT; #else - if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) + if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) return -EFAULT; #endif if (!ptr) { @@ -217,9 +217,9 @@ static int clear_rseq_cs(struct task_struct *t) * Set rseq_cs to NULL. */ #ifdef CONFIG_64BIT - return put_user(0UL, &t->rseq->rseq_cs.ptr64); + return put_user(0UL, &t->rseq->rseq_cs); #else - if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) + if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs))) return -EFAULT; return 0; #endif -- cgit v1.2.3-71-gd317 From ddecd22878601a606d160680fa85802b75d92eb6 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 11:34:07 +0100 Subject: perf: uapi: Document perf_event_attr::sig_data truncation on 32 bit architectures Due to the alignment requirements of siginfo_t, as described in 3ddb3fd8cdb0 ("signal, perf: Fix siginfo_t by avoiding u64 on 32-bit architectures"), siginfo_t::si_perf_data is limited to an unsigned long. However, perf_event_attr::sig_data is an u64, to avoid having to deal with compat conversions. Due to being an u64, it may not immediately be clear to users that sig_data is truncated on 32 bit architectures. Add a comment to explicitly point this out, and hopefully help some users save time by not having to deduce themselves what's happening. Reported-by: Dmitry Vyukov Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dmitry Vyukov Link: https://lore.kernel.org/r/20220131103407.1971678-3-elver@google.com --- include/uapi/linux/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 1b65042ab1db..82858b697c05 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -465,6 +465,8 @@ struct perf_event_attr { /* * User provided data if sigtrap=1, passed back to user via * siginfo_t::si_perf_data, e.g. to permit user to identify the event. + * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be + * truncated accordingly on 32 bit architectures. */ __u64 sig_data; }; -- cgit v1.2.3-71-gd317 From c86d86131ab75696fc52d98571148842e067d620 Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Wed, 2 Feb 2022 06:09:04 +0300 Subject: Partially revert "net/smc: Add netlink net namespace support" The change of sizeof(struct smc_diag_linkinfo) by commit 79d39fc503b4 ("net/smc: Add netlink net namespace support") introduced an ABI regression: since struct smc_diag_lgrinfo contains an object of type "struct smc_diag_linkinfo", offset of all subsequent members of struct smc_diag_lgrinfo was changed by that change. As result, applications compiled with the old version of struct smc_diag_linkinfo will receive garbage in struct smc_diag_lgrinfo.role if the kernel implements this new version of struct smc_diag_linkinfo. Fix this regression by reverting the part of commit 79d39fc503b4 that changes struct smc_diag_linkinfo. After all, there is SMC_GEN_NETLINK interface which is good enough, so there is probably no need to touch the smc_diag ABI in the first place. Fixes: 79d39fc503b4 ("net/smc: Add netlink net namespace support") Signed-off-by: Dmitry V. Levin Reviewed-by: Karsten Graul Link: https://lore.kernel.org/r/20220202030904.GA9742@altlinux.org Signed-off-by: Jakub Kicinski --- include/uapi/linux/smc_diag.h | 11 +++++------ net/smc/smc_diag.c | 2 -- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index c7008d87f1a4..8cb3a6fef553 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -84,12 +84,11 @@ struct smc_diag_conninfo { /* SMC_DIAG_LINKINFO */ struct smc_diag_linkinfo { - __u8 link_id; /* link identifier */ - __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */ - __u8 ibport; /* RDMA device port number */ - __u8 gid[40]; /* local GID */ - __u8 peer_gid[40]; /* peer GID */ - __aligned_u64 net_cookie; /* RDMA device net namespace */ + __u8 link_id; /* link identifier */ + __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */ + __u8 ibport; /* RDMA device port number */ + __u8 gid[40]; /* local GID */ + __u8 peer_gid[40]; /* peer GID */ }; struct smc_diag_lgrinfo { diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index b8898c787d23..1fca2f90a9c7 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -146,13 +146,11 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_link *link = smc->conn.lnk; - struct net *net = read_pnet(&link->smcibdev->ibdev->coredev.rdma_net); struct smc_diag_lgrinfo linfo = { .role = smc->conn.lgr->role, .lnk[0].ibport = link->ibport, .lnk[0].link_id = link->link_id, - .lnk[0].net_cookie = net->net_cookie, }; memcpy(linfo.lnk[0].ibname, -- cgit v1.2.3-71-gd317 From e4b1eb24ce5a696ef7229f9926ff34d7502f0582 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Thu, 27 Jan 2022 11:34:53 -0800 Subject: thermal: netlink: Add a new event to notify CPU capabilities change Add a new netlink event to notify change in CPU capabilities in terms of performance and efficiency. Firmware may change CPU capabilities as a result of thermal events in the system or to account for changes in the TDP (thermal design power) level. This notification type will allow user space to avoid running workloads on certain CPUs or proactively adjust power limits to avoid future events. The netlink message consists of a nested attribute (THERMAL_GENL_ATTR_CPU_CAPABILITY) with three attributes: * THERMAL_GENL_ATTR_CPU_CAPABILITY_ID (type u32): -- logical CPU number * THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE (type u32): -- Scaled performance from 0-1023 * THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY (type u32): -- Scaled efficiency from 0-1023 Reviewed-by: Len Brown Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/thermal/thermal_netlink.c | 53 +++++++++++++++++++++++++++++++++++++++ drivers/thermal/thermal_netlink.h | 14 +++++++++++ include/uapi/linux/thermal.h | 6 ++++- 3 files changed, 72 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index a16dd4d5d710..7c97a091680e 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -43,6 +43,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, + /* CPU capabilities */ + [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 }, }; struct param { @@ -58,6 +63,8 @@ struct param { int temp; int cdev_state; int cdev_max_state; + struct thermal_genl_cpu_caps *cpu_capabilities; + int cpu_capabilities_count; }; typedef int (*cb_t)(struct param *); @@ -190,6 +197,42 @@ static int thermal_genl_event_gov_change(struct param *p) return 0; } +static int thermal_genl_event_cpu_capability_change(struct param *p) +{ + struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities; + struct sk_buff *msg = p->msg; + struct nlattr *start_cap; + int i; + + start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY); + if (!start_cap) + return -EMSGSIZE; + + for (i = 0; i < p->cpu_capabilities_count; ++i) { + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + cpu_cap->cpu)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + cpu_cap->performance)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, + cpu_cap->efficiency)) + goto out_cancel_nest; + + ++cpu_cap; + } + + nla_nest_end(msg, start_cap); + + return 0; +out_cancel_nest: + nla_nest_cancel(msg, start_cap); + + return -EMSGSIZE; +} + int thermal_genl_event_tz_delete(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); @@ -219,6 +262,7 @@ static cb_t event_cb[] = { [THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete, [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update, [THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change, + [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change, }; /* @@ -356,6 +400,15 @@ int thermal_notify_tz_gov_change(int tz_id, const char *name) return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p); } +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps) +{ + struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p); +} +EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event); + /*************************** Command encoding ********************************/ static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz, diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h index e554f76291f4..04d1adbbc012 100644 --- a/drivers/thermal/thermal_netlink.h +++ b/drivers/thermal/thermal_netlink.h @@ -4,6 +4,12 @@ * Author: Daniel Lezcano */ +struct thermal_genl_cpu_caps { + int cpu; + int performance; + int efficiency; +}; + /* Netlink notification function */ #ifdef CONFIG_THERMAL_NETLINK int __init thermal_netlink_init(void); @@ -23,6 +29,8 @@ int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state); int thermal_notify_cdev_delete(int cdev_id); int thermal_notify_tz_gov_change(int tz_id, const char *name); int thermal_genl_sampling_temp(int id, int temp); +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps); #else static inline int thermal_netlink_init(void) { @@ -101,4 +109,10 @@ static inline int thermal_genl_sampling_temp(int id, int temp) { return 0; } + +static inline int thermal_genl_cpu_capability_event(int count, struct cpu_capability *caps) +{ + return 0; +} + #endif /* CONFIG_THERMAL_NETLINK */ diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index 9aa2fedfa309..fc78bf3aead7 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -44,7 +44,10 @@ enum thermal_genl_attr { THERMAL_GENL_ATTR_CDEV_MAX_STATE, THERMAL_GENL_ATTR_CDEV_NAME, THERMAL_GENL_ATTR_GOV_NAME, - + THERMAL_GENL_ATTR_CPU_CAPABILITY, + THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, __THERMAL_GENL_ATTR_MAX, }; #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) @@ -71,6 +74,7 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */ THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */ + THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */ __THERMAL_GENL_EVENT_MAX, }; #define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1) -- cgit v1.2.3-71-gd317 From d1ca60efc53d665cf89ed847a14a510a81770b81 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 2 Feb 2022 12:00:56 +0100 Subject: netfilter: ctnetlink: disable helper autoassign When userspace, e.g. conntrackd, inserts an entry with a specified helper, its possible that the helper is lost immediately after its added: ctnetlink_create_conntrack -> nf_ct_helper_ext_add + assign helper -> ctnetlink_setup_nat -> ctnetlink_parse_nat_setup -> parse_nat_setup -> nfnetlink_parse_nat_setup -> nf_nat_setup_info -> nf_conntrack_alter_reply -> __nf_ct_try_assign_helper ... and __nf_ct_try_assign_helper will zero the helper again. Set IPS_HELPER bit to bypass auto-assign logic, its unwanted, just like when helper is assigned via ruleset. Dropped old 'not strictly necessary' comment, it referred to use of rcu_assign_pointer() before it got replaced by RCU_INIT_POINTER(). NB: Fixes tag intentionally incorrect, this extends the referenced commit, but this change won't build without IPS_HELPER introduced there. Fixes: 6714cf5465d280 ("netfilter: nf_conntrack: fix explicit helper attachment and NAT") Reported-by: Pham Thanh Tuyen Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_conntrack_common.h | 2 +- net/netfilter/nf_conntrack_netlink.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 4b3395082d15..26071021e986 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -106,7 +106,7 @@ enum ip_conntrack_status { IPS_NAT_CLASH = IPS_UNTRACKED, #endif - /* Conntrack got a helper explicitly attached via CT target. */ + /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */ IPS_HELPER_BIT = 13, IPS_HELPER = (1 << IPS_HELPER_BIT), diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ac438370f94a..7032402ffd33 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2311,7 +2311,8 @@ ctnetlink_create_conntrack(struct net *net, if (helper->from_nlattr) helper->from_nlattr(helpinfo, ct); - /* not in hash table yet so not strictly necessary */ + /* disable helper auto-assignment for this entry */ + ct->status |= IPS_HELPER; RCU_INIT_POINTER(help->helper, helper); } } else { -- cgit v1.2.3-71-gd317 From 8b5413647262dda8d8d0e07e14ea1de9ac7cf0b2 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 17 Jan 2022 21:56:13 +0100 Subject: netfilter: nfqueue: enable to get skb->priority This info could be useful to improve traffic analysis. Signed-off-by: Nicolas Dichtel Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nfnetlink_queue.h | 1 + net/netfilter/nfnetlink_queue.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index aed90c4df0c8..ef7c97f21a15 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -61,6 +61,7 @@ enum nfqnl_attr_type { NFQA_SECCTX, /* security context string */ NFQA_VLAN, /* nested attribute: packet vlan info */ NFQA_L2HDR, /* full L2 header */ + NFQA_PRIORITY, /* skb->priority */ __NFQA_MAX }; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index ea2d9c2a44cf..48d7a59c6482 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -402,6 +402,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + + nla_total_size(sizeof(u_int32_t)) /* priority */ + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ + nla_total_size(sizeof(u_int32_t)); /* cap_len */ @@ -559,6 +560,10 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) goto nla_put_failure; + if (entskb->priority && + nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority))) + goto nla_put_failure; + if (indev && entskb->dev && skb_mac_header_was_set(entskb) && skb_mac_header_len(entskb) != 0) { -- cgit v1.2.3-71-gd317 From be847673cfffce8bb6e9ed6ae186081280c58831 Mon Sep 17 00:00:00 2001 From: Justin Iurman Date: Wed, 2 Feb 2022 15:25:53 +0100 Subject: uapi: ioam: Insertion frequency Add the insertion frequency uapi for IOAM lwtunnels. Signed-off-by: Justin Iurman Signed-off-by: Jakub Kicinski --- include/uapi/linux/ioam6_iptunnel.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h index 829ffdfcacca..38f6a8fdfd34 100644 --- a/include/uapi/linux/ioam6_iptunnel.h +++ b/include/uapi/linux/ioam6_iptunnel.h @@ -41,6 +41,15 @@ enum { /* IOAM Trace Header */ IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */ + /* Insertion frequency: + * "k over n" packets (0 < k <= n) + * [0.0001% ... 100%] + */ +#define IOAM6_IPTUNNEL_FREQ_MIN 1 +#define IOAM6_IPTUNNEL_FREQ_MAX 1000000 + IOAM6_IPTUNNEL_FREQ_K, /* u32 */ + IOAM6_IPTUNNEL_FREQ_N, /* u32 */ + __IOAM6_IPTUNNEL_MAX, }; -- cgit v1.2.3-71-gd317 From 3698807094ecae945436921325f5c309d1123f11 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 24 Aug 2021 16:13:41 -0400 Subject: drm/amdkfd: CRIU Introduce Checkpoint-Restore APIs Checkpoint-Restore in userspace (CRIU) is a powerful tool that can snapshot a running process and later restore it on same or a remote machine but expects the processes that have a device file (e.g. GPU) associated with them, provide necessary driver support to assist CRIU and its extensible plugin interface. Thus, In order to support the Checkpoint-Restore of any ROCm process, the AMD Radeon Open Compute Kernel driver, needs to provide a set of new APIs that provide necessary VRAM metadata and its contents to a userspace component (CRIU plugin) that can store it in form of image files. This introduces some new ioctls which will be used to checkpoint-Restore any KFD bound user process. KFD only allows ioctl calls from the same process that opened the KFD file descriptor. Since these ioctls are expected to be called from a KFD criu plugin which has elevated ptrace attached privileges and CAP_CHECKPOINT_RESTORE capabilities attached with the file descriptors so modify KFD to allow such calls. (API redesigned by David Yat Sin) Suggested-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: David Yat Sin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 98 +++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 65 ++++++++++++++++++++- include/uapi/linux/kfd_ioctl.h | 81 +++++++++++++++++++++++++- 3 files changed, 241 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 214a2c67fba4..90e6d9e335a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include "kfd_priv.h" @@ -1859,6 +1860,75 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) } #endif +static int criu_checkpoint(struct file *filep, + struct kfd_process *p, + struct kfd_ioctl_criu_args *args) +{ + return 0; +} + +static int criu_restore(struct file *filep, + struct kfd_process *p, + struct kfd_ioctl_criu_args *args) +{ + return 0; +} + +static int criu_unpause(struct file *filep, + struct kfd_process *p, + struct kfd_ioctl_criu_args *args) +{ + return 0; +} + +static int criu_resume(struct file *filep, + struct kfd_process *p, + struct kfd_ioctl_criu_args *args) +{ + return 0; +} + +static int criu_process_info(struct file *filep, + struct kfd_process *p, + struct kfd_ioctl_criu_args *args) +{ + return 0; +} + +static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) +{ + struct kfd_ioctl_criu_args *args = data; + int ret; + + dev_dbg(kfd_device, "CRIU operation: %d\n", args->op); + switch (args->op) { + case KFD_CRIU_OP_PROCESS_INFO: + ret = criu_process_info(filep, p, args); + break; + case KFD_CRIU_OP_CHECKPOINT: + ret = criu_checkpoint(filep, p, args); + break; + case KFD_CRIU_OP_UNPAUSE: + ret = criu_unpause(filep, p, args); + break; + case KFD_CRIU_OP_RESTORE: + ret = criu_restore(filep, p, args); + break; + case KFD_CRIU_OP_RESUME: + ret = criu_resume(filep, p, args); + break; + default: + dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op); + ret = -EINVAL; + break; + } + + if (ret) + dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret); + + return ret; +} + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ .cmd_drv = 0, .name = #ioctl} @@ -1962,6 +2032,10 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE, kfd_ioctl_set_xnack_mode, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP, + kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE), + }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) @@ -1976,6 +2050,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) char *kdata = NULL; unsigned int usize, asize; int retcode = -EINVAL; + bool ptrace_attached = false; if (nr >= AMDKFD_CORE_IOCTL_COUNT) goto err_i1; @@ -2001,7 +2076,15 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) * processes need to create their own KFD device context. */ process = filep->private_data; - if (process->lead_thread != current->group_leader) { + + rcu_read_lock(); + if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) && + ptrace_parent(process->lead_thread) == current) + ptrace_attached = true; + rcu_read_unlock(); + + if (process->lead_thread != current->group_leader + && !ptrace_attached) { dev_dbg(kfd_device, "Using KFD FD in wrong process\n"); retcode = -EBADF; goto err_i1; @@ -2016,6 +2099,19 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) goto err_i1; } + /* + * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support + * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a + * more priviledged access. + */ + if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) { + if (!capable(CAP_CHECKPOINT_RESTORE) && + !capable(CAP_SYS_ADMIN)) { + retcode = -EACCES; + goto err_i1; + } + } + if (cmd & (IOC_IN | IOC_OUT)) { if (asize <= sizeof(stack_kdata)) { kdata = stack_kdata; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ea68f3b3a4e9..f928878196ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -121,7 +121,26 @@ */ #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 - +/** + * enum kfd_ioctl_flags - KFD ioctl flags + * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how + * userspace can use a given ioctl. + */ +enum kfd_ioctl_flags { + /* + * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: + * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially + * perform privileged operations and load arbitrary data into MQDs and + * eventually HQD registers when the queue is mapped by HWS. In order to + * prevent this we should perform additional security checks. + * + * This is equivalent to callers with the CHECKPOINT_RESTORE capability. + * + * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, + * we also allow ioctls with SYS_ADMIN capability. + */ + KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), +}; /* * Kernel module parameter to specify maximum number of supported queues per * device @@ -1006,6 +1025,50 @@ void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr); +/* CRIU */ +/* + * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private + * structures: + * kfd_criu_process_priv_data + * kfd_criu_device_priv_data + * kfd_criu_bo_priv_data + * kfd_criu_queue_priv_data + * kfd_criu_event_priv_data + * kfd_criu_svm_range_priv_data + */ + +#define KFD_CRIU_PRIV_VERSION 1 + +struct kfd_criu_process_priv_data { + uint32_t version; +}; + +struct kfd_criu_device_priv_data { + /* For future use */ + uint64_t reserved; +}; + +struct kfd_criu_bo_priv_data { + uint64_t reserved; +}; + +struct kfd_criu_svm_range_priv_data { + uint32_t object_type; + uint32_t reserved; +}; + +struct kfd_criu_queue_priv_data { + uint32_t object_type; + uint32_t reserved; +}; + +struct kfd_criu_event_priv_data { + uint32_t object_type; + uint32_t reserved; +}; + +/* CRIU - End */ + /* Queue Context Management */ int init_queue(struct queue **q, const struct queue_properties *properties); void uninit_queue(struct queue *q); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index af96af174dc4..49429a6c42fc 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -468,6 +468,82 @@ struct kfd_ioctl_smi_events_args { __u32 anon_fd; /* from KFD */ }; +/************************************************************************************************** + * CRIU IOCTLs (Checkpoint Restore In Userspace) + * + * When checkpointing a process, the userspace application will perform: + * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts + * all the queues. + * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges) + * 3. UNPAUSE op to un-evict all the queues + * + * When restoring a process, the CRIU userspace application will perform: + * + * 1. RESTORE op to restore process contents + * 2. RESUME op to start the process + * + * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User + * application needs to perform an UNPAUSE operation after calling PROCESS_INFO. + */ + +enum kfd_criu_op { + KFD_CRIU_OP_PROCESS_INFO, + KFD_CRIU_OP_CHECKPOINT, + KFD_CRIU_OP_UNPAUSE, + KFD_CRIU_OP_RESTORE, + KFD_CRIU_OP_RESUME, +}; + +/** + * kfd_ioctl_criu_args - Arguments perform CRIU operation + * @devices: [in/out] User pointer to memory location for devices information. + * This is an array of type kfd_criu_device_bucket. + * @bos: [in/out] User pointer to memory location for BOs information + * This is an array of type kfd_criu_bo_bucket. + * @priv_data: [in/out] User pointer to memory location for private data + * @priv_data_size: [in/out] Size of priv_data in bytes + * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array. + * @num_bos [in/out] Number of BOs used by process. Size of @bos array. + * @num_objects: [in/out] Number of objects used by process. Objects are opaque to + * user application. + * @pid: [in/out] PID of the process being checkpointed + * @op [in] Type of operation (kfd_criu_op) + * + * Return: 0 on success, -errno on failure + */ +struct kfd_ioctl_criu_args { + __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */ + __u32 op; +}; + +struct kfd_criu_device_bucket { + __u32 user_gpu_id; + __u32 actual_gpu_id; + __u32 drm_fd; + __u32 pad; +}; + +struct kfd_criu_bo_bucket { + __u64 addr; + __u64 size; + __u64 offset; + __u64 restored_offset; /* During restore, updated offset for BO */ + __u32 gpu_id; /* This is the user_gpu_id */ + __u32 alloc_flags; + __u32 dmabuf_fd; + __u32 pad; +}; + +/* CRIU IOCTLs - END */ +/**************************************************************************************************/ + /* Register offset inside the remapped mmio page */ enum kfd_mmio_remap { @@ -742,7 +818,10 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_SET_XNACK_MODE \ AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) +#define AMDKFD_IOC_CRIU_OP \ + AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x22 +#define AMDKFD_COMMAND_END 0x23 #endif -- cgit v1.2.3-71-gd317 From 692996f2bef7aa1737e07554255ba0d9a73fb750 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Tue, 18 Jan 2022 01:47:58 -0500 Subject: drm/amdkfd: Bump up KFD API version for CRIU - Change KFD minor version to 7 for CRIU Proposed userspace changes: https://github.com/RadeonOpenCompute/criu Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- include/uapi/linux/kfd_ioctl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 49429a6c42fc..e6a56c146920 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -32,9 +32,10 @@ * - 1.4 - Indicate new SRAM EDC bit in device properties * - 1.5 - Add SVM API * - 1.6 - Query clear flags in SVM get_attr API + * - 1.7 - Checkpoint Restore (CRIU) API */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 6 +#define KFD_IOCTL_MINOR_VERSION 7 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ -- cgit v1.2.3-71-gd317 From 7c76ecd9c99b6e9a771d813ab1aa7fa428b3ade1 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 8 Feb 2022 16:14:32 +0200 Subject: xfrm: enforce validity of offload input flags struct xfrm_user_offload has flags variable that received user input, but kernel didn't check if valid bits were provided. It caused a situation where not sanitized input was forwarded directly to the drivers. For example, XFRM_OFFLOAD_IPV6 define that was exposed, was used by strongswan, but not implemented in the kernel at all. As a solution, check and sanitize input flags to forward XFRM_OFFLOAD_INBOUND to the drivers. Fixes: d77e38e612a0 ("xfrm: Add an IPsec hardware offloading API") Signed-off-by: Leon Romanovsky Signed-off-by: Steffen Klassert --- include/uapi/linux/xfrm.h | 6 ++++++ net/xfrm/xfrm_device.c | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 4e29d7851890..65e13a099b1a 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -511,6 +511,12 @@ struct xfrm_user_offload { int ifindex; __u8 flags; }; +/* This flag was exposed without any kernel code that supporting it. + * Unfortunately, strongswan has the code that uses sets this flag, + * which makes impossible to reuse this bit. + * + * So leave it here to make sure that it won't be reused by mistake. + */ #define XFRM_OFFLOAD_IPV6 1 #define XFRM_OFFLOAD_INBOUND 2 diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 3fa066419d37..39bce5d764de 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -223,6 +223,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, if (x->encap || x->tfcpad) return -EINVAL; + if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) + return -EINVAL; + dev = dev_get_by_index(net, xuo->ifindex); if (!dev) { if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { @@ -262,7 +265,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; xso->num_exthdrs = 1; - xso->flags = xuo->flags; + /* Don't forward bit that is not implemented */ + xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6; err = dev->xfrmdev_ops->xdo_dev_state_add(x); if (err) { -- cgit v1.2.3-71-gd317 From 63ed1aab3d40aa61aaa66819bdce9377ac7f40fa Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 9 Feb 2022 12:05:57 +0800 Subject: mctp: Add SIOCMCTP{ALLOC,DROP}TAG ioctls for tag control This change adds a couple of new ioctls for mctp sockets: SIOCMCTPALLOCTAG and SIOCMCTPDROPTAG. These ioctls provide facilities for explicit allocation / release of tags, overriding the automatic allocate-on-send/release-on-reply and timeout behaviours. This allows userspace more control over messages that may not fit a simple request/response model. In order to indicate a pre-allocated tag to the sendmsg() syscall, we introduce a new flag to the struct sockaddr_mctp.smctp_tag value: MCTP_TAG_PREALLOC. Additional changes from Jeremy Kerr . Contains a fix that was: Reported-by: kernel test robot Signed-off-by: Matt Johnston Signed-off-by: Jeremy Kerr Signed-off-by: David S. Miller --- Documentation/networking/mctp.rst | 48 ++++++++++ include/net/mctp.h | 11 ++- include/trace/events/mctp.h | 5 +- include/uapi/linux/mctp.h | 18 ++++ net/mctp/af_mctp.c | 189 ++++++++++++++++++++++++++++++++------ net/mctp/route.c | 114 +++++++++++++++++------ 6 files changed, 329 insertions(+), 56 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/mctp.rst b/Documentation/networking/mctp.rst index 46f74bffce0f..c628cb5406d2 100644 --- a/Documentation/networking/mctp.rst +++ b/Documentation/networking/mctp.rst @@ -212,6 +212,54 @@ remote address is already known, or the message does not require a reply. Like the send calls, sockets will only receive responses to requests they have sent (TO=1) and may only respond (TO=0) to requests they have received. +``ioctl(SIOCMCTPALLOCTAG)`` and ``ioctl(SIOCMCTPDROPTAG)`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These tags give applications more control over MCTP message tags, by allocating +(and dropping) tag values explicitly, rather than the kernel automatically +allocating a per-message tag at ``sendmsg()`` time. + +In general, you will only need to use these ioctls if your MCTP protocol does +not fit the usual request/response model. For example, if you need to persist +tags across multiple requests, or a request may generate more than one response. +In these cases, the ioctls allow you to decouple the tag allocation (and +release) from individual message send and receive operations. + +Both ioctls are passed a pointer to a ``struct mctp_ioc_tag_ctl``: + +.. code-block:: C + + struct mctp_ioc_tag_ctl { + mctp_eid_t peer_addr; + __u8 tag; + __u16 flags; + }; + +``SIOCMCTPALLOCTAG`` allocates a tag for a specific peer, which an application +can use in future ``sendmsg()`` calls. The application populates the +``peer_addr`` member with the remote EID. Other fields must be zero. + +On return, the ``tag`` member will be populated with the allocated tag value. +The allocated tag will have the following tag bits set: + + - ``MCTP_TAG_OWNER``: it only makes sense to allocate tags if you're the tag + owner + + - ``MCTP_TAG_PREALLOC``: to indicate to ``sendmsg()`` that this is a + preallocated tag. + + - ... and the actual tag value, within the least-significant three bits + (``MCTP_TAG_MASK``). Note that zero is a valid tag value. + +The tag value should be used as-is for the ``smctp_tag`` member of ``struct +sockaddr_mctp``. + +``SIOCMCTPDROPTAG`` releases a tag that has been previously allocated by a +``SIOCMCTPALLOCTAG`` ioctl. The ``peer_addr`` must be the same as used for the +allocation, and the ``tag`` value must match exactly the tag returned from the +allocation (including the ``MCTP_TAG_OWNER`` and ``MCTP_TAG_PREALLOC`` bits). +The ``flags`` field must be zero. + Kernel internals ================ diff --git a/include/net/mctp.h b/include/net/mctp.h index 706d329dd8e8..e80a4baf8379 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -126,7 +126,7 @@ struct mctp_sock { */ struct mctp_sk_key { mctp_eid_t peer_addr; - mctp_eid_t local_addr; + mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */ __u8 tag; /* incoming tag match; invert TO for local */ /* we hold a ref to sk when set */ @@ -163,6 +163,12 @@ struct mctp_sk_key { */ unsigned long dev_flow_state; struct mctp_dev *dev; + + /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire + * automatically on timeout or response, instead SIOCMCTPDROPTAG + * is used. + */ + bool manual_alloc; }; struct mctp_skb_cb { @@ -239,6 +245,9 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag); void mctp_key_unref(struct mctp_sk_key *key); +struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, + mctp_eid_t daddr, mctp_eid_t saddr, + bool manual, u8 *tagp); /* routing <--> device interface */ unsigned int mctp_default_net(struct net *net); diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h index 175b057c507f..165cf25f77a7 100644 --- a/include/trace/events/mctp.h +++ b/include/trace/events/mctp.h @@ -15,6 +15,7 @@ enum { MCTP_TRACE_KEY_REPLIED, MCTP_TRACE_KEY_INVALIDATED, MCTP_TRACE_KEY_CLOSED, + MCTP_TRACE_KEY_DROPPED, }; #endif /* __TRACE_MCTP_ENUMS */ @@ -22,6 +23,7 @@ TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED); +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_DROPPED); TRACE_EVENT(mctp_key_acquire, TP_PROTO(const struct mctp_sk_key *key), @@ -66,7 +68,8 @@ TRACE_EVENT(mctp_key_release, { MCTP_TRACE_KEY_TIMEOUT, "timeout" }, { MCTP_TRACE_KEY_REPLIED, "replied" }, { MCTP_TRACE_KEY_INVALIDATED, "invalidated" }, - { MCTP_TRACE_KEY_CLOSED, "closed" }) + { MCTP_TRACE_KEY_CLOSED, "closed" }, + { MCTP_TRACE_KEY_DROPPED, "dropped" }) ) ); diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h index 07b0318716fc..154ab56651f1 100644 --- a/include/uapi/linux/mctp.h +++ b/include/uapi/linux/mctp.h @@ -44,7 +44,25 @@ struct sockaddr_mctp_ext { #define MCTP_TAG_MASK 0x07 #define MCTP_TAG_OWNER 0x08 +#define MCTP_TAG_PREALLOC 0x10 #define MCTP_OPT_ADDR_EXT 1 +#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0) +#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1) + +struct mctp_ioc_tag_ctl { + mctp_eid_t peer_addr; + + /* For SIOCMCTPALLOCTAG: must be passed as zero, kernel will + * populate with the allocated tag value. Returned tag value will + * always have TO and PREALLOC set. + * + * For SIOCMCTPDROPTAG: userspace provides tag value to drop, from + * a prior SIOCMCTPALLOCTAG call (and so must have TO and PREALLOC set). + */ + __u8 tag; + __u16 flags; +}; + #endif /* __UAPI_MCTP_H */ diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index c921de63b494..f0702d920d8d 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -6,6 +6,7 @@ * Copyright (c) 2021 Google */ +#include #include #include #include @@ -21,6 +22,8 @@ /* socket implementation */ +static void mctp_sk_expire_keys(struct timer_list *timer); + static int mctp_release(struct socket *sock) { struct sock *sk = sock->sk; @@ -99,13 +102,20 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct sk_buff *skb; if (addr) { + const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER | + MCTP_TAG_PREALLOC; + if (addrlen < sizeof(struct sockaddr_mctp)) return -EINVAL; if (addr->smctp_family != AF_MCTP) return -EINVAL; if (!mctp_sockaddr_is_ok(addr)) return -EINVAL; - if (addr->smctp_tag & ~(MCTP_TAG_MASK | MCTP_TAG_OWNER)) + if (addr->smctp_tag & ~tagbits) + return -EINVAL; + /* can't preallocate a non-owned tag */ + if (addr->smctp_tag & MCTP_TAG_PREALLOC && + !(addr->smctp_tag & MCTP_TAG_OWNER)) return -EINVAL; } else { @@ -248,6 +258,32 @@ out_free: return rc; } +/* We're done with the key; invalidate, stop reassembly, and remove from lists. + */ +static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net, + unsigned long flags, unsigned long reason) +__releases(&key->lock) +__must_hold(&net->mctp.keys_lock) +{ + struct sk_buff *skb; + + trace_mctp_key_release(key, reason); + skb = key->reasm_head; + key->reasm_head = NULL; + key->reasm_dead = true; + key->valid = false; + mctp_dev_release_key(key->dev, key); + spin_unlock_irqrestore(&key->lock, flags); + + hlist_del(&key->hlist); + hlist_del(&key->sklist); + + /* unref for the lists */ + mctp_key_unref(key); + + kfree_skb(skb); +} + static int mctp_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -293,6 +329,115 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname, return -EINVAL; } +static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg) +{ + struct net *net = sock_net(&msk->sk); + struct mctp_sk_key *key = NULL; + struct mctp_ioc_tag_ctl ctl; + unsigned long flags; + u8 tag; + + if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) + return -EFAULT; + + if (ctl.tag) + return -EINVAL; + + if (ctl.flags) + return -EINVAL; + + key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY, + true, &tag); + if (IS_ERR(key)) + return PTR_ERR(key); + + ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC; + if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) { + spin_lock_irqsave(&key->lock, flags); + __mctp_key_remove(key, net, flags, MCTP_TRACE_KEY_DROPPED); + mctp_key_unref(key); + return -EFAULT; + } + + mctp_key_unref(key); + return 0; +} + +static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg) +{ + struct net *net = sock_net(&msk->sk); + struct mctp_ioc_tag_ctl ctl; + unsigned long flags, fl2; + struct mctp_sk_key *key; + struct hlist_node *tmp; + int rc; + u8 tag; + + if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl))) + return -EFAULT; + + if (ctl.flags) + return -EINVAL; + + /* Must be a local tag, TO set, preallocated */ + if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC)) + return -EINVAL; + + tag = ctl.tag & MCTP_TAG_MASK; + rc = -EINVAL; + + spin_lock_irqsave(&net->mctp.keys_lock, flags); + hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { + /* we do an irqsave here, even though we know the irq state, + * so we have the flags to pass to __mctp_key_remove + */ + spin_lock_irqsave(&key->lock, fl2); + if (key->manual_alloc && + ctl.peer_addr == key->peer_addr && + tag == key->tag) { + __mctp_key_remove(key, net, fl2, + MCTP_TRACE_KEY_DROPPED); + rc = 0; + } else { + spin_unlock_irqrestore(&key->lock, fl2); + } + } + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); + + return rc; +} + +static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk); + + switch (cmd) { + case SIOCMCTPALLOCTAG: + return mctp_ioctl_alloctag(msk, arg); + case SIOCMCTPDROPTAG: + return mctp_ioctl_droptag(msk, arg); + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + + switch (cmd) { + /* These have compatible ptr layouts */ + case SIOCMCTPALLOCTAG: + case SIOCMCTPDROPTAG: + return mctp_ioctl(sock, cmd, (unsigned long)argp); + } + + return -ENOIOCTLCMD; +} +#endif + static const struct proto_ops mctp_dgram_ops = { .family = PF_MCTP, .release = mctp_release, @@ -302,7 +447,7 @@ static const struct proto_ops mctp_dgram_ops = { .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, - .ioctl = sock_no_ioctl, + .ioctl = mctp_ioctl, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, @@ -312,6 +457,9 @@ static const struct proto_ops mctp_dgram_ops = { .recvmsg = mctp_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, +#ifdef CONFIG_COMPAT + .compat_ioctl = mctp_compat_ioctl, +#endif }; static void mctp_sk_expire_keys(struct timer_list *timer) @@ -319,7 +467,7 @@ static void mctp_sk_expire_keys(struct timer_list *timer) struct mctp_sock *msk = container_of(timer, struct mctp_sock, key_expiry); struct net *net = sock_net(&msk->sk); - unsigned long next_expiry, flags; + unsigned long next_expiry, flags, fl2; struct mctp_sk_key *key; struct hlist_node *tmp; bool next_expiry_valid = false; @@ -327,15 +475,16 @@ static void mctp_sk_expire_keys(struct timer_list *timer) spin_lock_irqsave(&net->mctp.keys_lock, flags); hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { - spin_lock(&key->lock); + /* don't expire. manual_alloc is immutable, no locking + * required. + */ + if (key->manual_alloc) + continue; + spin_lock_irqsave(&key->lock, fl2); if (!time_after_eq(key->expiry, jiffies)) { - trace_mctp_key_release(key, MCTP_TRACE_KEY_TIMEOUT); - key->valid = false; - hlist_del_rcu(&key->hlist); - hlist_del_rcu(&key->sklist); - spin_unlock(&key->lock); - mctp_key_unref(key); + __mctp_key_remove(key, net, fl2, + MCTP_TRACE_KEY_TIMEOUT); continue; } @@ -346,7 +495,7 @@ static void mctp_sk_expire_keys(struct timer_list *timer) next_expiry = key->expiry; next_expiry_valid = true; } - spin_unlock(&key->lock); + spin_unlock_irqrestore(&key->lock, fl2); } spin_unlock_irqrestore(&net->mctp.keys_lock, flags); @@ -387,9 +536,9 @@ static void mctp_sk_unhash(struct sock *sk) { struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); struct net *net = sock_net(sk); + unsigned long flags, fl2; struct mctp_sk_key *key; struct hlist_node *tmp; - unsigned long flags; /* remove from any type-based binds */ mutex_lock(&net->mctp.bind_lock); @@ -399,20 +548,8 @@ static void mctp_sk_unhash(struct sock *sk) /* remove tag allocations */ spin_lock_irqsave(&net->mctp.keys_lock, flags); hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) { - hlist_del(&key->sklist); - hlist_del(&key->hlist); - - trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED); - - spin_lock(&key->lock); - kfree_skb(key->reasm_head); - key->reasm_head = NULL; - key->reasm_dead = true; - key->valid = false; - spin_unlock(&key->lock); - - /* key is no longer on the lookup lists, unref */ - mctp_key_unref(key); + spin_lock_irqsave(&key->lock, fl2); + __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED); } spin_unlock_irqrestore(&net->mctp.keys_lock, flags); } diff --git a/net/mctp/route.c b/net/mctp/route.c index 35f72e99e188..17e3482aa770 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -203,29 +203,38 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) return rc; } -/* We're done with the key; unset valid and remove from lists. There may still - * be outstanding refs on the key though... +/* Helper for mctp_route_input(). + * We're done with the key; unlock and unref the key. + * For the usual case of automatic expiry we remove the key from lists. + * In the case that manual allocation is set on a key we release the lock + * and local ref, reset reassembly, but don't remove from lists. */ -static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net, - unsigned long flags) - __releases(&key->lock) +static void __mctp_key_done_in(struct mctp_sk_key *key, struct net *net, + unsigned long flags, unsigned long reason) +__releases(&key->lock) { struct sk_buff *skb; + trace_mctp_key_release(key, reason); skb = key->reasm_head; key->reasm_head = NULL; - key->reasm_dead = true; - key->valid = false; - mctp_dev_release_key(key->dev, key); + + if (!key->manual_alloc) { + key->reasm_dead = true; + key->valid = false; + mctp_dev_release_key(key->dev, key); + } spin_unlock_irqrestore(&key->lock, flags); - spin_lock_irqsave(&net->mctp.keys_lock, flags); - hlist_del(&key->hlist); - hlist_del(&key->sklist); - spin_unlock_irqrestore(&net->mctp.keys_lock, flags); + if (!key->manual_alloc) { + spin_lock_irqsave(&net->mctp.keys_lock, flags); + hlist_del(&key->hlist); + hlist_del(&key->sklist); + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); - /* one unref for the lists */ - mctp_key_unref(key); + /* unref for the lists */ + mctp_key_unref(key); + } /* and one for the local reference */ mctp_key_unref(key); @@ -379,9 +388,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) /* we've hit a pending reassembly; not much we * can do but drop it */ - trace_mctp_key_release(key, - MCTP_TRACE_KEY_REPLIED); - __mctp_key_unlock_drop(key, net, f); + __mctp_key_done_in(key, net, f, + MCTP_TRACE_KEY_REPLIED); key = NULL; } rc = 0; @@ -423,9 +431,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) } else { if (key->reasm_head || key->reasm_dead) { /* duplicate start? drop everything */ - trace_mctp_key_release(key, - MCTP_TRACE_KEY_INVALIDATED); - __mctp_key_unlock_drop(key, net, f); + __mctp_key_done_in(key, net, f, + MCTP_TRACE_KEY_INVALIDATED); rc = -EEXIST; key = NULL; } else { @@ -448,10 +455,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * the reassembly/response key */ if (!rc && flags & MCTP_HDR_FLAG_EOM) { + msk = container_of(key->sk, struct mctp_sock, sk); sock_queue_rcv_skb(key->sk, key->reasm_head); key->reasm_head = NULL; - trace_mctp_key_release(key, MCTP_TRACE_KEY_REPLIED); - __mctp_key_unlock_drop(key, net, f); + __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); key = NULL; } @@ -579,9 +586,9 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key, /* Allocate a locally-owned tag value for (saddr, daddr), and reserve * it for the socket msk */ -static struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, - mctp_eid_t saddr, - mctp_eid_t daddr, u8 *tagp) +struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, + mctp_eid_t daddr, mctp_eid_t saddr, + bool manual, u8 *tagp) { struct net *net = sock_net(&msk->sk); struct netns_mctp *mns = &net->mctp; @@ -636,6 +643,7 @@ static struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, mctp_reserve_tag(net, key, msk); trace_mctp_key_acquire(key); + key->manual_alloc = manual; *tagp = key->tag; } @@ -649,6 +657,50 @@ static struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, return key; } +static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk, + mctp_eid_t daddr, + u8 req_tag, u8 *tagp) +{ + struct net *net = sock_net(&msk->sk); + struct netns_mctp *mns = &net->mctp; + struct mctp_sk_key *key, *tmp; + unsigned long flags; + + req_tag &= ~(MCTP_TAG_PREALLOC | MCTP_TAG_OWNER); + key = NULL; + + spin_lock_irqsave(&mns->keys_lock, flags); + + hlist_for_each_entry(tmp, &mns->keys, hlist) { + if (tmp->tag != req_tag) + continue; + + if (!mctp_address_matches(tmp->peer_addr, daddr)) + continue; + + if (!tmp->manual_alloc) + continue; + + spin_lock(&tmp->lock); + if (tmp->valid) { + key = tmp; + refcount_inc(&key->refs); + spin_unlock(&tmp->lock); + break; + } + spin_unlock(&tmp->lock); + } + spin_unlock_irqrestore(&mns->keys_lock, flags); + + if (!key) + return ERR_PTR(-ENOENT); + + if (tagp) + *tagp = key->tag; + + return key; +} + /* routing lookups */ static bool mctp_rt_match_eid(struct mctp_route *rt, unsigned int net, mctp_eid_t eid) @@ -843,8 +895,14 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, if (rc) goto out_release; - if (req_tag & MCTP_HDR_FLAG_TO) { - key = mctp_alloc_local_tag(msk, saddr, daddr, &tag); + if (req_tag & MCTP_TAG_OWNER) { + if (req_tag & MCTP_TAG_PREALLOC) + key = mctp_lookup_prealloc_tag(msk, daddr, + req_tag, &tag); + else + key = mctp_alloc_local_tag(msk, daddr, saddr, + false, &tag); + if (IS_ERR(key)) { rc = PTR_ERR(key); goto out_release; @@ -855,7 +913,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, tag |= MCTP_HDR_FLAG_TO; } else { key = NULL; - tag = req_tag; + tag = req_tag & MCTP_TAG_MASK; } skb->protocol = htons(ETH_P_MCTP); -- cgit v1.2.3-71-gd317 From 9a69e2b385f443f244a7e8b8bcafe5ccfb0866b4 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Wed, 9 Feb 2022 19:43:32 +0100 Subject: bpf: Make remote_port field in struct bpf_sk_lookup 16-bit wide remote_port is another case of a BPF context field documented as a 32-bit value in network byte order for which the BPF context access converter generates a load of a zero-padded 16-bit integer in network byte order. First such case was dst_port in bpf_sock which got addressed in commit 4421a582718a ("bpf: Make dst_port field in struct bpf_sock 16-bit wide"). Loading 4-bytes from the remote_port offset and converting the value with bpf_ntohl() leads to surprising results, as the expected value is shifted by 16 bits. Reduce the confusion by splitting the field in two - a 16-bit field holding a big-endian integer, and a 16-bit zero-padding anonymous field that follows it. Suggested-by: Alexei Starovoitov Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220209184333.654927-2-jakub@cloudflare.com --- include/uapi/linux/bpf.h | 3 ++- net/bpf/test_run.c | 4 ++-- net/core/filter.c | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a7f0ddedac1f..afe3d0d7f5f2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6453,7 +6453,8 @@ struct bpf_sk_lookup { __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ __u32 remote_ip4; /* Network byte order */ __u32 remote_ip6[4]; /* Network byte order */ - __u32 remote_port; /* Network byte order */ + __be16 remote_port; /* Network byte order */ + __u16 :16; /* Zero padding */ __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index cb150f756f3d..f08034500813 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1147,7 +1147,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) goto out; - if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { + if (user_ctx->local_port > U16_MAX) { ret = -ERANGE; goto out; } @@ -1155,7 +1155,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat ctx.family = (u16)user_ctx->family; ctx.protocol = (u16)user_ctx->protocol; ctx.dport = (u16)user_ctx->local_port; - ctx.sport = (__force __be16)user_ctx->remote_port; + ctx.sport = user_ctx->remote_port; switch (ctx.family) { case AF_INET: diff --git a/net/core/filter.c b/net/core/filter.c index 99a05199a806..83f06d3b2c52 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -10843,7 +10843,8 @@ static bool sk_lookup_is_valid_access(int off, int size, case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): - case bpf_ctx_range(struct bpf_sk_lookup, remote_port): + case offsetof(struct bpf_sk_lookup, remote_port) ... + offsetof(struct bpf_sk_lookup, local_ip4) - 1: case bpf_ctx_range(struct bpf_sk_lookup, local_port): case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex): bpf_ctx_record_field_size(info, sizeof(__u32)); -- cgit v1.2.3-71-gd317 From 5bdd3eb253544b1e80f904e1205699d0a126d2d6 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 4 Feb 2022 11:58:32 -0500 Subject: drm/amdkfd: Remove unused old debugger implementation Cleanup the kfd code by removing the unused old debugger implementation. The address watch was only ever implemented in the upstream driver for GFXv7 (Kaveri). The user mode tools runtime using this API was never open-sourced. Work on the old debugger prototype that used this API has been discontinued years ago. Only a small piece of resetting wavefronts is kept and is moved to kfd_device_queue_manager.c. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 3 - .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 3 - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 24 - .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 25 - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 96 --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 24 - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 24 - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 10 - drivers/gpu/drm/amd/amdkfd/Makefile | 2 - drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 290 +------ drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 845 --------------------- drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h | 230 ------ drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 158 ---- drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h | 293 ------- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 - .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 59 ++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 35 + drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 12 - drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 - drivers/gpu/drm/amd/amdkfd/kfd_process.c | 19 - drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 9 - include/uapi/linux/kfd_ioctl.h | 8 +- 22 files changed, 106 insertions(+), 2070 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c delete mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h (limited to 'include/uapi/linux') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 46cd4ee6bafb..c8935d718207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -37,10 +37,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, - .address_watch_disable = kgd_gfx_v9_address_watch_disable, - .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, - .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index abe93b3ff765..4191af5a3f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -289,10 +289,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, - .address_watch_disable = kgd_gfx_v9_address_watch_disable, - .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, - .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 7b7f4b2764c1..9378fc79e9ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -671,20 +671,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct amdgpu_device *adev) -{ - return 0; -} - -static int kgd_address_watch_execute(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo) -{ - return 0; -} - static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) @@ -709,13 +695,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev, return 0; } -static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset) -{ - return 0; -} - static void set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -767,10 +746,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, .hqd_destroy = kgd_hqd_destroy, .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = set_vm_context_page_table_base, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 1f37d3574001..e9c80ce13f3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -582,21 +582,6 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, return 0; } - -static int address_watch_disable_v10_3(struct amdgpu_device *adev) -{ - return 0; -} - -static int address_watch_execute_v10_3(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo) -{ - return 0; -} - static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) @@ -621,13 +606,6 @@ static int wave_control_execute_v10_3(struct amdgpu_device *adev, return 0; } -static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset) -{ - return 0; -} - static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -809,10 +787,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, .hqd_destroy = hqd_destroy_v10_3, .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, - .address_watch_disable = address_watch_disable_v10_3, - .address_watch_execute = address_watch_execute_v10_3, .wave_control_execute = wave_control_execute_v10_3, - .address_watch_get_offset = address_watch_get_offset_v10_3, .get_atc_vmid_pasid_mapping_info = NULL, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .program_trap_handler_settings = program_trap_handler_settings_v10_3, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 36528dad7684..65552bb7d2f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -45,43 +45,6 @@ enum { MAX_WATCH_ADDRESSES = 4 }; -enum { - ADDRESS_WATCH_REG_ADDR_HI = 0, - ADDRESS_WATCH_REG_ADDR_LO, - ADDRESS_WATCH_REG_CNTL, - ADDRESS_WATCH_REG_MAX -}; - -/* not defined in the CI/KV reg file */ -enum { - ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, - ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, - ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, - /* extend the mask to 26 bits to match the low address field */ - ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, - ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF -}; - -static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = { - mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL, - mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL, - mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL, - mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL -}; - -union TCP_WATCH_CNTL_BITS { - struct { - uint32_t mask:24; - uint32_t vmid:4; - uint32_t atc:1; - uint32_t mode:2; - uint32_t valid:1; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { @@ -529,55 +492,6 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, return 0; } -static int kgd_address_watch_disable(struct amdgpu_device *adev) -{ - union TCP_WATCH_CNTL_BITS cntl; - unsigned int i; - - cntl.u32All = 0; - - cntl.bitfields.valid = 0; - cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK; - cntl.bitfields.atc = 1; - - /* Turning off this address until we set all the registers */ - for (i = 0; i < MAX_WATCH_ADDRESSES; i++) - WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX + - ADDRESS_WATCH_REG_CNTL], cntl.u32All); - - return 0; -} - -static int kgd_address_watch_execute(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo) -{ - union TCP_WATCH_CNTL_BITS cntl; - - cntl.u32All = cntl_val; - - /* Turning off this watch point until we set all the registers */ - cntl.bitfields.valid = 0; - WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + - ADDRESS_WATCH_REG_CNTL], cntl.u32All); - - WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + - ADDRESS_WATCH_REG_ADDR_HI], addr_hi); - - WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + - ADDRESS_WATCH_REG_ADDR_LO], addr_lo); - - /* Enable the watch point */ - cntl.bitfields.valid = 1; - - WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + - ADDRESS_WATCH_REG_CNTL], cntl.u32All); - - return 0; -} - static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) @@ -602,13 +516,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev, return 0; } -static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset) -{ - return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; -} - static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { @@ -665,10 +572,7 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, .hqd_destroy = kgd_hqd_destroy, .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, .set_vm_context_page_table_base = set_vm_context_page_table_base, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 52832cd69a93..9dc5f2a0cc07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -538,20 +538,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct amdgpu_device *adev) -{ - return 0; -} - -static int kgd_address_watch_execute(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo) -{ - return 0; -} - static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) @@ -576,13 +562,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev, return 0; } -static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset) -{ - return 0; -} - static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { @@ -614,10 +593,7 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, .hqd_destroy = kgd_hqd_destroy, .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 1abf662a0e91..53895a41932e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -622,20 +622,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev) -{ - return 0; -} - -int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo) -{ - return 0; -} - int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) @@ -660,13 +646,6 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, return 0; } -uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset) -{ - return 0; -} - void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -888,10 +867,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_gfx_v9_address_watch_disable, - .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, - .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 24be49df26fd..c7ed3bc9053c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -46,19 +46,9 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev); -int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); -uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset); - bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index c4f3aff11072..19cfbf9577b4 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -51,8 +51,6 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_events.o \ $(AMDKFD_PATH)/cik_event_interrupt.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ - $(AMDKFD_PATH)/kfd_dbgdev.o \ - $(AMDKFD_PATH)/kfd_dbgmgr.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ $(AMDKFD_PATH)/kfd_crat.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 64e3b4e3a712..13e46631e289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -39,7 +39,6 @@ #include #include "kfd_priv.h" #include "kfd_device_queue_manager.h" -#include "kfd_dbgmgr.h" #include "kfd_svm.h" #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" @@ -580,299 +579,26 @@ err_pdd: static int kfd_ioctl_dbg_register(struct file *filep, struct kfd_process *p, void *data) { - struct kfd_ioctl_dbg_register_args *args = data; - struct kfd_dev *dev; - struct kfd_dbgmgr *dbgmgr_ptr; - struct kfd_process_device *pdd; - bool create_ok; - long status = 0; - - mutex_lock(&p->mutex); - pdd = kfd_process_device_data_by_id(p, args->gpu_id); - if (!pdd) { - status = -EINVAL; - goto err_pdd; - } - dev = pdd->dev; - - if (dev->adev->asic_type == CHIP_CARRIZO) { - pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); - status = -EINVAL; - goto err_chip_unsupp; - } - - mutex_lock(kfd_get_dbgmgr_mutex()); - - /* - * make sure that we have pdd, if this the first queue created for - * this process - */ - pdd = kfd_bind_process_to_device(dev, p); - if (IS_ERR(pdd)) { - status = PTR_ERR(pdd); - goto out; - } - - if (!dev->dbgmgr) { - /* In case of a legal call, we have no dbgmgr yet */ - create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); - if (create_ok) { - status = kfd_dbgmgr_register(dbgmgr_ptr, p); - if (status != 0) - kfd_dbgmgr_destroy(dbgmgr_ptr); - else - dev->dbgmgr = dbgmgr_ptr; - } - } else { - pr_debug("debugger already registered\n"); - status = -EINVAL; - } - -out: - mutex_unlock(kfd_get_dbgmgr_mutex()); -err_pdd: -err_chip_unsupp: - mutex_unlock(&p->mutex); - - return status; + return -EPERM; } static int kfd_ioctl_dbg_unregister(struct file *filep, struct kfd_process *p, void *data) { - struct kfd_ioctl_dbg_unregister_args *args = data; - struct kfd_process_device *pdd; - struct kfd_dev *dev; - long status; - - mutex_lock(&p->mutex); - pdd = kfd_process_device_data_by_id(p, args->gpu_id); - mutex_unlock(&p->mutex); - if (!pdd || !pdd->dev->dbgmgr) - return -EINVAL; - - dev = pdd->dev; - - if (dev->adev->asic_type == CHIP_CARRIZO) { - pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); - return -EINVAL; - } - - mutex_lock(kfd_get_dbgmgr_mutex()); - - status = kfd_dbgmgr_unregister(dev->dbgmgr, p); - if (!status) { - kfd_dbgmgr_destroy(dev->dbgmgr); - dev->dbgmgr = NULL; - } - - mutex_unlock(kfd_get_dbgmgr_mutex()); - - return status; + return -EPERM; } -/* - * Parse and generate variable size data structure for address watch. - * Total size of the buffer and # watch points is limited in order - * to prevent kernel abuse. (no bearing to the much smaller HW limitation - * which is enforced by dbgdev module) - * please also note that the watch address itself are not "copied from user", - * since it be set into the HW in user mode values. - * - */ static int kfd_ioctl_dbg_address_watch(struct file *filep, struct kfd_process *p, void *data) { - struct kfd_ioctl_dbg_address_watch_args *args = data; - struct kfd_dev *dev; - struct kfd_process_device *pdd; - struct dbg_address_watch_info aw_info; - unsigned char *args_buff; - long status; - void __user *cmd_from_user; - uint64_t watch_mask_value = 0; - unsigned int args_idx = 0; - - memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); - - mutex_lock(&p->mutex); - pdd = kfd_process_device_data_by_id(p, args->gpu_id); - mutex_unlock(&p->mutex); - if (!pdd) { - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); - return -EINVAL; - } - dev = pdd->dev; - - if (dev->adev->asic_type == CHIP_CARRIZO) { - pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); - return -EINVAL; - } - cmd_from_user = (void __user *) args->content_ptr; - - /* Validate arguments */ - - if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || - (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) || - (cmd_from_user == NULL)) - return -EINVAL; - - /* this is the actual buffer to work with */ - args_buff = memdup_user(cmd_from_user, - args->buf_size_in_bytes - sizeof(*args)); - if (IS_ERR(args_buff)) - return PTR_ERR(args_buff); - - aw_info.process = p; - - aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx])); - args_idx += sizeof(aw_info.num_watch_points); - - aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx]; - args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points; - - /* - * set watch address base pointer to point on the array base - * within args_buff - */ - aw_info.watch_address = (uint64_t *) &args_buff[args_idx]; - - /* skip over the addresses buffer */ - args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points; - - if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) { - status = -EINVAL; - goto out; - } - - watch_mask_value = (uint64_t) args_buff[args_idx]; - - if (watch_mask_value > 0) { - /* - * There is an array of masks. - * set watch mask base pointer to point on the array base - * within args_buff - */ - aw_info.watch_mask = (uint64_t *) &args_buff[args_idx]; - - /* skip over the masks buffer */ - args_idx += sizeof(aw_info.watch_mask) * - aw_info.num_watch_points; - } else { - /* just the NULL mask, set to NULL and skip over it */ - aw_info.watch_mask = NULL; - args_idx += sizeof(aw_info.watch_mask); - } - - if (args_idx >= args->buf_size_in_bytes - sizeof(args)) { - status = -EINVAL; - goto out; - } - - /* Currently HSA Event is not supported for DBG */ - aw_info.watch_event = NULL; - - mutex_lock(kfd_get_dbgmgr_mutex()); - - status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info); - - mutex_unlock(kfd_get_dbgmgr_mutex()); - -out: - kfree(args_buff); - - return status; + return -EPERM; } /* Parse and generate fixed size data structure for wave control */ static int kfd_ioctl_dbg_wave_control(struct file *filep, struct kfd_process *p, void *data) { - struct kfd_ioctl_dbg_wave_control_args *args = data; - struct kfd_dev *dev; - struct kfd_process_device *pdd; - struct dbg_wave_control_info wac_info; - unsigned char *args_buff; - uint32_t computed_buff_size; - long status; - void __user *cmd_from_user; - unsigned int args_idx = 0; - - memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info)); - - /* we use compact form, independent of the packing attribute value */ - computed_buff_size = sizeof(*args) + - sizeof(wac_info.mode) + - sizeof(wac_info.operand) + - sizeof(wac_info.dbgWave_msg.DbgWaveMsg) + - sizeof(wac_info.dbgWave_msg.MemoryVA) + - sizeof(wac_info.trapId); - - mutex_lock(&p->mutex); - pdd = kfd_process_device_data_by_id(p, args->gpu_id); - mutex_unlock(&p->mutex); - if (!pdd) { - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); - return -EINVAL; - } - dev = pdd->dev; - - if (dev->adev->asic_type == CHIP_CARRIZO) { - pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); - return -EINVAL; - } - - /* input size must match the computed "compact" size */ - if (args->buf_size_in_bytes != computed_buff_size) { - pr_debug("size mismatch, computed : actual %u : %u\n", - args->buf_size_in_bytes, computed_buff_size); - return -EINVAL; - } - - cmd_from_user = (void __user *) args->content_ptr; - - if (cmd_from_user == NULL) - return -EINVAL; - - /* copy the entire buffer from user */ - - args_buff = memdup_user(cmd_from_user, - args->buf_size_in_bytes - sizeof(*args)); - if (IS_ERR(args_buff)) - return PTR_ERR(args_buff); - - /* move ptr to the start of the "pay-load" area */ - wac_info.process = p; - - wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx])); - args_idx += sizeof(wac_info.operand); - - wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx])); - args_idx += sizeof(wac_info.mode); - - wac_info.trapId = *((uint32_t *)(&args_buff[args_idx])); - args_idx += sizeof(wac_info.trapId); - - wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value = - *((uint32_t *)(&args_buff[args_idx])); - wac_info.dbgWave_msg.MemoryVA = NULL; - - mutex_lock(kfd_get_dbgmgr_mutex()); - - pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n", - wac_info.process, wac_info.operand, - wac_info.mode, wac_info.trapId, - wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value); - - status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info); - - pr_debug("Returned status of dbg manager is %ld\n", status); - - mutex_unlock(kfd_get_dbgmgr_mutex()); - - kfree(args_buff); - - return status; + return -EPERM; } static int kfd_ioctl_get_clock_counters(struct file *filep, @@ -2900,16 +2626,16 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS, kfd_ioctl_wait_events, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER, + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED, kfd_ioctl_dbg_register, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER, + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED, kfd_ioctl_dbg_unregister, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH, + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED, kfd_ioctl_dbg_address_watch, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL, + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED, kfd_ioctl_dbg_wave_control, 0), AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c deleted file mode 100644 index 8eca9ed3ab36..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ /dev/null @@ -1,845 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "kfd_pm4_headers.h" -#include "kfd_pm4_headers_diq.h" -#include "kfd_kernel_queue.h" -#include "kfd_priv.h" -#include "kfd_pm4_opcodes.h" -#include "cik_regs.h" -#include "kfd_dbgmgr.h" -#include "kfd_dbgdev.h" -#include "kfd_device_queue_manager.h" - -static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev) -{ - dev->kfd2kgd->address_watch_disable(dev->adev); -} - -static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, - u32 pasid, uint64_t vmid0_address, - uint32_t *packet_buff, size_t size_in_bytes) -{ - struct pm4__release_mem *rm_packet; - struct pm4__indirect_buffer_pasid *ib_packet; - struct kfd_mem_obj *mem_obj; - size_t pq_packets_size_in_bytes; - union ULARGE_INTEGER *largep; - union ULARGE_INTEGER addr; - struct kernel_queue *kq; - uint64_t *rm_state; - unsigned int *ib_packet_buff; - int status; - - if (WARN_ON(!size_in_bytes)) - return -EINVAL; - - kq = dbgdev->kq; - - pq_packets_size_in_bytes = sizeof(struct pm4__release_mem) + - sizeof(struct pm4__indirect_buffer_pasid); - - /* - * We acquire a buffer from DIQ - * The receive packet buff will be sitting on the Indirect Buffer - * and in the PQ we put the IB packet + sync packet(s). - */ - status = kq_acquire_packet_buffer(kq, - pq_packets_size_in_bytes / sizeof(uint32_t), - &ib_packet_buff); - if (status) { - pr_err("kq_acquire_packet_buffer failed\n"); - return status; - } - - memset(ib_packet_buff, 0, pq_packets_size_in_bytes); - - ib_packet = (struct pm4__indirect_buffer_pasid *) (ib_packet_buff); - - ib_packet->header.count = 3; - ib_packet->header.opcode = IT_INDIRECT_BUFFER_PASID; - ib_packet->header.type = PM4_TYPE_3; - - largep = (union ULARGE_INTEGER *) &vmid0_address; - - ib_packet->bitfields2.ib_base_lo = largep->u.low_part >> 2; - ib_packet->bitfields3.ib_base_hi = largep->u.high_part; - - ib_packet->control = (1 << 23) | (1 << 31) | - ((size_in_bytes / 4) & 0xfffff); - - ib_packet->bitfields5.pasid = pasid; - - /* - * for now we use release mem for GPU-CPU synchronization - * Consider WaitRegMem + WriteData as a better alternative - * we get a GART allocations ( gpu/cpu mapping), - * for the sync variable, and wait until: - * (a) Sync with HW - * (b) Sync var is written by CP to mem. - */ - rm_packet = (struct pm4__release_mem *) (ib_packet_buff + - (sizeof(struct pm4__indirect_buffer_pasid) / - sizeof(unsigned int))); - - status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t), - &mem_obj); - - if (status) { - pr_err("Failed to allocate GART memory\n"); - kq_rollback_packet(kq); - return status; - } - - rm_state = (uint64_t *) mem_obj->cpu_ptr; - - *rm_state = QUEUESTATE__ACTIVE_COMPLETION_PENDING; - - rm_packet->header.opcode = IT_RELEASE_MEM; - rm_packet->header.type = PM4_TYPE_3; - rm_packet->header.count = sizeof(struct pm4__release_mem) / 4 - 2; - - rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - rm_packet->bitfields2.event_index = - event_index___release_mem__end_of_pipe; - - rm_packet->bitfields2.cache_policy = cache_policy___release_mem__lru; - rm_packet->bitfields2.atc = 0; - rm_packet->bitfields2.tc_wb_action_ena = 1; - - addr.quad_part = mem_obj->gpu_addr; - - rm_packet->bitfields4.address_lo_32b = addr.u.low_part >> 2; - rm_packet->address_hi = addr.u.high_part; - - rm_packet->bitfields3.data_sel = - data_sel___release_mem__send_64_bit_data; - - rm_packet->bitfields3.int_sel = - int_sel___release_mem__send_data_after_write_confirm; - - rm_packet->bitfields3.dst_sel = - dst_sel___release_mem__memory_controller; - - rm_packet->data_lo = QUEUESTATE__ACTIVE; - - kq_submit_packet(kq); - - /* Wait till CP writes sync code: */ - status = amdkfd_fence_wait_timeout( - rm_state, - QUEUESTATE__ACTIVE, 1500); - - kfd_gtt_sa_free(dbgdev->dev, mem_obj); - - return status; -} - -static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev) -{ - /* - * no action is needed in this case, - * just make sure diq will not be used - */ - - dbgdev->kq = NULL; - - return 0; -} - -static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) -{ - struct queue_properties properties; - unsigned int qid; - struct kernel_queue *kq = NULL; - int status; - - properties.type = KFD_QUEUE_TYPE_DIQ; - - status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL, - &properties, &qid, NULL, NULL, NULL, NULL); - - if (status) { - pr_err("Failed to create DIQ\n"); - return status; - } - - pr_debug("DIQ Created with queue id: %d\n", qid); - - kq = pqm_get_kernel_queue(dbgdev->pqm, qid); - - if (!kq) { - pr_err("Error getting DIQ\n"); - pqm_destroy_queue(dbgdev->pqm, qid); - return -EFAULT; - } - - dbgdev->kq = kq; - - return status; -} - -static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev) -{ - /* disable watch address */ - dbgdev_address_watch_disable_nodiq(dbgdev->dev); - return 0; -} - -static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev) -{ - /* todo - disable address watch */ - int status; - - status = pqm_destroy_queue(dbgdev->pqm, - dbgdev->kq->queue->properties.queue_id); - dbgdev->kq = NULL; - - return status; -} - -static void dbgdev_address_watch_set_registers( - const struct dbg_address_watch_info *adw_info, - union TCP_WATCH_ADDR_H_BITS *addrHi, - union TCP_WATCH_ADDR_L_BITS *addrLo, - union TCP_WATCH_CNTL_BITS *cntl, - unsigned int index, unsigned int vmid) -{ - union ULARGE_INTEGER addr; - - addr.quad_part = 0; - addrHi->u32All = 0; - addrLo->u32All = 0; - cntl->u32All = 0; - - if (adw_info->watch_mask) - cntl->bitfields.mask = - (uint32_t) (adw_info->watch_mask[index] & - ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK); - else - cntl->bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK; - - addr.quad_part = (unsigned long long) adw_info->watch_address[index]; - - addrHi->bitfields.addr = addr.u.high_part & - ADDRESS_WATCH_REG_ADDHIGH_MASK; - addrLo->bitfields.addr = - (addr.u.low_part >> ADDRESS_WATCH_REG_ADDLOW_SHIFT); - - cntl->bitfields.mode = adw_info->watch_mode[index]; - cntl->bitfields.vmid = (uint32_t) vmid; - /* for now assume it is an ATC address */ - cntl->u32All |= ADDRESS_WATCH_REG_CNTL_ATC_BIT; - - pr_debug("\t\t%20s %08x\n", "set reg mask :", cntl->bitfields.mask); - pr_debug("\t\t%20s %08x\n", "set reg add high :", - addrHi->bitfields.addr); - pr_debug("\t\t%20s %08x\n", "set reg add low :", - addrLo->bitfields.addr); -} - -static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, - struct dbg_address_watch_info *adw_info) -{ - union TCP_WATCH_ADDR_H_BITS addrHi; - union TCP_WATCH_ADDR_L_BITS addrLo; - union TCP_WATCH_CNTL_BITS cntl; - struct kfd_process_device *pdd; - unsigned int i; - - /* taking the vmid for that process on the safe way using pdd */ - pdd = kfd_get_process_device_data(dbgdev->dev, - adw_info->process); - if (!pdd) { - pr_err("Failed to get pdd for wave control no DIQ\n"); - return -EFAULT; - } - - addrHi.u32All = 0; - addrLo.u32All = 0; - cntl.u32All = 0; - - if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || - (adw_info->num_watch_points == 0)) { - pr_err("num_watch_points is invalid\n"); - return -EINVAL; - } - - if (!adw_info->watch_mode || !adw_info->watch_address) { - pr_err("adw_info fields are not valid\n"); - return -EINVAL; - } - - for (i = 0; i < adw_info->num_watch_points; i++) { - dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo, - &cntl, i, pdd->qpd.vmid); - - pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); - pr_debug("\t\t%20s %08x\n", "register index :", i); - pr_debug("\t\t%20s %08x\n", "vmid is :", pdd->qpd.vmid); - pr_debug("\t\t%20s %08x\n", "Address Low is :", - addrLo.bitfields.addr); - pr_debug("\t\t%20s %08x\n", "Address high is :", - addrHi.bitfields.addr); - pr_debug("\t\t%20s %08x\n", "Address high is :", - addrHi.bitfields.addr); - pr_debug("\t\t%20s %08x\n", "Control Mask is :", - cntl.bitfields.mask); - pr_debug("\t\t%20s %08x\n", "Control Mode is :", - cntl.bitfields.mode); - pr_debug("\t\t%20s %08x\n", "Control Vmid is :", - cntl.bitfields.vmid); - pr_debug("\t\t%20s %08x\n", "Control atc is :", - cntl.bitfields.atc); - pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); - - pdd->dev->kfd2kgd->address_watch_execute( - dbgdev->dev->adev, - i, - cntl.u32All, - addrHi.u32All, - addrLo.u32All); - } - - return 0; -} - -static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, - struct dbg_address_watch_info *adw_info) -{ - struct pm4__set_config_reg *packets_vec; - union TCP_WATCH_ADDR_H_BITS addrHi; - union TCP_WATCH_ADDR_L_BITS addrLo; - union TCP_WATCH_CNTL_BITS cntl; - struct kfd_mem_obj *mem_obj; - unsigned int aw_reg_add_dword; - uint32_t *packet_buff_uint; - unsigned int i; - int status; - size_t ib_size = sizeof(struct pm4__set_config_reg) * 4; - /* we do not control the vmid in DIQ mode, just a place holder */ - unsigned int vmid = 0; - - addrHi.u32All = 0; - addrLo.u32All = 0; - cntl.u32All = 0; - - if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || - (adw_info->num_watch_points == 0)) { - pr_err("num_watch_points is invalid\n"); - return -EINVAL; - } - - if (!adw_info->watch_mode || !adw_info->watch_address) { - pr_err("adw_info fields are not valid\n"); - return -EINVAL; - } - - status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); - - if (status) { - pr_err("Failed to allocate GART memory\n"); - return status; - } - - packet_buff_uint = mem_obj->cpu_ptr; - - memset(packet_buff_uint, 0, ib_size); - - packets_vec = (struct pm4__set_config_reg *) (packet_buff_uint); - - packets_vec[0].header.count = 1; - packets_vec[0].header.opcode = IT_SET_CONFIG_REG; - packets_vec[0].header.type = PM4_TYPE_3; - packets_vec[0].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET; - packets_vec[0].bitfields2.insert_vmid = 1; - packets_vec[1].ordinal1 = packets_vec[0].ordinal1; - packets_vec[1].bitfields2.insert_vmid = 0; - packets_vec[2].ordinal1 = packets_vec[0].ordinal1; - packets_vec[2].bitfields2.insert_vmid = 0; - packets_vec[3].ordinal1 = packets_vec[0].ordinal1; - packets_vec[3].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET; - packets_vec[3].bitfields2.insert_vmid = 1; - - for (i = 0; i < adw_info->num_watch_points; i++) { - dbgdev_address_watch_set_registers(adw_info, - &addrHi, - &addrLo, - &cntl, - i, - vmid); - - pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); - pr_debug("\t\t%20s %08x\n", "register index :", i); - pr_debug("\t\t%20s %08x\n", "vmid is :", vmid); - pr_debug("\t\t%20s %p\n", "Add ptr is :", - adw_info->watch_address); - pr_debug("\t\t%20s %08llx\n", "Add is :", - adw_info->watch_address[i]); - pr_debug("\t\t%20s %08x\n", "Address Low is :", - addrLo.bitfields.addr); - pr_debug("\t\t%20s %08x\n", "Address high is :", - addrHi.bitfields.addr); - pr_debug("\t\t%20s %08x\n", "Control Mask is :", - cntl.bitfields.mask); - pr_debug("\t\t%20s %08x\n", "Control Mode is :", - cntl.bitfields.mode); - pr_debug("\t\t%20s %08x\n", "Control Vmid is :", - cntl.bitfields.vmid); - pr_debug("\t\t%20s %08x\n", "Control atc is :", - cntl.bitfields.atc); - pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); - - aw_reg_add_dword = - dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->adev, - i, - ADDRESS_WATCH_REG_CNTL); - - packets_vec[0].bitfields2.reg_offset = - aw_reg_add_dword - AMD_CONFIG_REG_BASE; - - packets_vec[0].reg_data[0] = cntl.u32All; - - aw_reg_add_dword = - dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->adev, - i, - ADDRESS_WATCH_REG_ADDR_HI); - - packets_vec[1].bitfields2.reg_offset = - aw_reg_add_dword - AMD_CONFIG_REG_BASE; - packets_vec[1].reg_data[0] = addrHi.u32All; - - aw_reg_add_dword = - dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->adev, - i, - ADDRESS_WATCH_REG_ADDR_LO); - - packets_vec[2].bitfields2.reg_offset = - aw_reg_add_dword - AMD_CONFIG_REG_BASE; - packets_vec[2].reg_data[0] = addrLo.u32All; - - /* enable watch flag if address is not zero*/ - if (adw_info->watch_address[i] > 0) - cntl.bitfields.valid = 1; - else - cntl.bitfields.valid = 0; - - aw_reg_add_dword = - dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->adev, - i, - ADDRESS_WATCH_REG_CNTL); - - packets_vec[3].bitfields2.reg_offset = - aw_reg_add_dword - AMD_CONFIG_REG_BASE; - packets_vec[3].reg_data[0] = cntl.u32All; - - status = dbgdev_diq_submit_ib( - dbgdev, - adw_info->process->pasid, - mem_obj->gpu_addr, - packet_buff_uint, - ib_size); - - if (status) { - pr_err("Failed to submit IB to DIQ\n"); - break; - } - } - - kfd_gtt_sa_free(dbgdev->dev, mem_obj); - return status; -} - -static int dbgdev_wave_control_set_registers( - struct dbg_wave_control_info *wac_info, - union SQ_CMD_BITS *in_reg_sq_cmd, - union GRBM_GFX_INDEX_BITS *in_reg_gfx_index) -{ - int status = 0; - union SQ_CMD_BITS reg_sq_cmd; - union GRBM_GFX_INDEX_BITS reg_gfx_index; - struct HsaDbgWaveMsgAMDGen2 *pMsg; - - reg_sq_cmd.u32All = 0; - reg_gfx_index.u32All = 0; - pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2; - - switch (wac_info->mode) { - /* Send command to single wave */ - case HSA_DBG_WAVEMODE_SINGLE: - /* - * Limit access to the process waves only, - * by setting vmid check - */ - reg_sq_cmd.bits.check_vmid = 1; - reg_sq_cmd.bits.simd_id = pMsg->ui32.SIMD; - reg_sq_cmd.bits.wave_id = pMsg->ui32.WaveId; - reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_SINGLE; - - reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray; - reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine; - reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU; - - break; - - /* Send command to all waves with matching VMID */ - case HSA_DBG_WAVEMODE_BROADCAST_PROCESS: - - reg_gfx_index.bits.sh_broadcast_writes = 1; - reg_gfx_index.bits.se_broadcast_writes = 1; - reg_gfx_index.bits.instance_broadcast_writes = 1; - - reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; - - break; - - /* Send command to all CU waves with matching VMID */ - case HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU: - - reg_sq_cmd.bits.check_vmid = 1; - reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; - - reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray; - reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine; - reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU; - - break; - - default: - return -EINVAL; - } - - switch (wac_info->operand) { - case HSA_DBG_WAVEOP_HALT: - reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_HALT; - break; - - case HSA_DBG_WAVEOP_RESUME: - reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_RESUME; - break; - - case HSA_DBG_WAVEOP_KILL: - reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; - break; - - case HSA_DBG_WAVEOP_DEBUG: - reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_DEBUG; - break; - - case HSA_DBG_WAVEOP_TRAP: - if (wac_info->trapId < MAX_TRAPID) { - reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_TRAP; - reg_sq_cmd.bits.trap_id = wac_info->trapId; - } else { - status = -EINVAL; - } - break; - - default: - status = -EINVAL; - break; - } - - if (status == 0) { - *in_reg_sq_cmd = reg_sq_cmd; - *in_reg_gfx_index = reg_gfx_index; - } - - return status; -} - -static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, - struct dbg_wave_control_info *wac_info) -{ - - int status; - union SQ_CMD_BITS reg_sq_cmd; - union GRBM_GFX_INDEX_BITS reg_gfx_index; - struct kfd_mem_obj *mem_obj; - uint32_t *packet_buff_uint; - struct pm4__set_config_reg *packets_vec; - size_t ib_size = sizeof(struct pm4__set_config_reg) * 3; - - reg_sq_cmd.u32All = 0; - - status = dbgdev_wave_control_set_registers(wac_info, ®_sq_cmd, - ®_gfx_index); - if (status) { - pr_err("Failed to set wave control registers\n"); - return status; - } - - /* we do not control the VMID in DIQ, so reset it to a known value */ - reg_sq_cmd.bits.vm_id = 0; - - pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - - pr_debug("\t\t mode is: %u\n", wac_info->mode); - pr_debug("\t\t operand is: %u\n", wac_info->operand); - pr_debug("\t\t trap id is: %u\n", wac_info->trapId); - pr_debug("\t\t msg value is: %u\n", - wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value); - pr_debug("\t\t vmid is: N/A\n"); - - pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid); - pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd); - pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id); - pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id); - pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode); - pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id); - pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id); - - pr_debug("\t\t ibw is : %u\n", - reg_gfx_index.bitfields.instance_broadcast_writes); - pr_debug("\t\t ii is : %u\n", - reg_gfx_index.bitfields.instance_index); - pr_debug("\t\t sebw is : %u\n", - reg_gfx_index.bitfields.se_broadcast_writes); - pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index); - pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index); - pr_debug("\t\t sbw is : %u\n", - reg_gfx_index.bitfields.sh_broadcast_writes); - - pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - - status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); - - if (status != 0) { - pr_err("Failed to allocate GART memory\n"); - return status; - } - - packet_buff_uint = mem_obj->cpu_ptr; - - memset(packet_buff_uint, 0, ib_size); - - packets_vec = (struct pm4__set_config_reg *) packet_buff_uint; - packets_vec[0].header.count = 1; - packets_vec[0].header.opcode = IT_SET_UCONFIG_REG; - packets_vec[0].header.type = PM4_TYPE_3; - packets_vec[0].bitfields2.reg_offset = - GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE; - - packets_vec[0].bitfields2.insert_vmid = 0; - packets_vec[0].reg_data[0] = reg_gfx_index.u32All; - - packets_vec[1].header.count = 1; - packets_vec[1].header.opcode = IT_SET_CONFIG_REG; - packets_vec[1].header.type = PM4_TYPE_3; - packets_vec[1].bitfields2.reg_offset = SQ_CMD / 4 - AMD_CONFIG_REG_BASE; - - packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET; - packets_vec[1].bitfields2.insert_vmid = 1; - packets_vec[1].reg_data[0] = reg_sq_cmd.u32All; - - /* Restore the GRBM_GFX_INDEX register */ - - reg_gfx_index.u32All = 0; - reg_gfx_index.bits.sh_broadcast_writes = 1; - reg_gfx_index.bits.instance_broadcast_writes = 1; - reg_gfx_index.bits.se_broadcast_writes = 1; - - - packets_vec[2].ordinal1 = packets_vec[0].ordinal1; - packets_vec[2].bitfields2.reg_offset = - GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE; - - packets_vec[2].bitfields2.insert_vmid = 0; - packets_vec[2].reg_data[0] = reg_gfx_index.u32All; - - status = dbgdev_diq_submit_ib( - dbgdev, - wac_info->process->pasid, - mem_obj->gpu_addr, - packet_buff_uint, - ib_size); - - if (status) - pr_err("Failed to submit IB to DIQ\n"); - - kfd_gtt_sa_free(dbgdev->dev, mem_obj); - - return status; -} - -static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev, - struct dbg_wave_control_info *wac_info) -{ - int status; - union SQ_CMD_BITS reg_sq_cmd; - union GRBM_GFX_INDEX_BITS reg_gfx_index; - struct kfd_process_device *pdd; - - reg_sq_cmd.u32All = 0; - - /* taking the VMID for that process on the safe way using PDD */ - pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process); - - if (!pdd) { - pr_err("Failed to get pdd for wave control no DIQ\n"); - return -EFAULT; - } - status = dbgdev_wave_control_set_registers(wac_info, ®_sq_cmd, - ®_gfx_index); - if (status) { - pr_err("Failed to set wave control registers\n"); - return status; - } - - /* for non DIQ we need to patch the VMID: */ - - reg_sq_cmd.bits.vm_id = pdd->qpd.vmid; - - pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - - pr_debug("\t\t mode is: %u\n", wac_info->mode); - pr_debug("\t\t operand is: %u\n", wac_info->operand); - pr_debug("\t\t trap id is: %u\n", wac_info->trapId); - pr_debug("\t\t msg value is: %u\n", - wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value); - pr_debug("\t\t vmid is: %u\n", pdd->qpd.vmid); - - pr_debug("\t\t chk_vmid is : %u\n", reg_sq_cmd.bitfields.check_vmid); - pr_debug("\t\t command is : %u\n", reg_sq_cmd.bitfields.cmd); - pr_debug("\t\t queue id is : %u\n", reg_sq_cmd.bitfields.queue_id); - pr_debug("\t\t simd id is : %u\n", reg_sq_cmd.bitfields.simd_id); - pr_debug("\t\t mode is : %u\n", reg_sq_cmd.bitfields.mode); - pr_debug("\t\t vm_id is : %u\n", reg_sq_cmd.bitfields.vm_id); - pr_debug("\t\t wave_id is : %u\n", reg_sq_cmd.bitfields.wave_id); - - pr_debug("\t\t ibw is : %u\n", - reg_gfx_index.bitfields.instance_broadcast_writes); - pr_debug("\t\t ii is : %u\n", - reg_gfx_index.bitfields.instance_index); - pr_debug("\t\t sebw is : %u\n", - reg_gfx_index.bitfields.se_broadcast_writes); - pr_debug("\t\t se_ind is : %u\n", reg_gfx_index.bitfields.se_index); - pr_debug("\t\t sh_ind is : %u\n", reg_gfx_index.bitfields.sh_index); - pr_debug("\t\t sbw is : %u\n", - reg_gfx_index.bitfields.sh_broadcast_writes); - - pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - - return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev, - reg_gfx_index.u32All, - reg_sq_cmd.u32All); -} - -int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) -{ - int status = 0; - unsigned int vmid; - uint16_t queried_pasid; - union SQ_CMD_BITS reg_sq_cmd; - union GRBM_GFX_INDEX_BITS reg_gfx_index; - struct kfd_process_device *pdd; - struct dbg_wave_control_info wac_info; - int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; - int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; - - reg_sq_cmd.u32All = 0; - status = 0; - - wac_info.mode = HSA_DBG_WAVEMODE_BROADCAST_PROCESS; - wac_info.operand = HSA_DBG_WAVEOP_KILL; - - pr_debug("Killing all process wavefronts\n"); - - /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. - * ATC_VMID15_PASID_MAPPING - * to check which VMID the current process is mapped to. - */ - - for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { - status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info - (dev->adev, vmid, &queried_pasid); - - if (status && queried_pasid == p->pasid) { - pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", - vmid, p->pasid); - break; - } - } - - if (vmid > last_vmid_to_scan) { - pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); - return -EFAULT; - } - - /* taking the VMID for that process on the safe way using PDD */ - pdd = kfd_get_process_device_data(dev, p); - if (!pdd) - return -EFAULT; - - status = dbgdev_wave_control_set_registers(&wac_info, ®_sq_cmd, - ®_gfx_index); - if (status != 0) - return -EINVAL; - - /* for non DIQ we need to patch the VMID: */ - reg_sq_cmd.bits.vm_id = vmid; - - dev->kfd2kgd->wave_control_execute(dev->adev, - reg_gfx_index.u32All, - reg_sq_cmd.u32All); - - return 0; -} - -void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev, - enum DBGDEV_TYPE type) -{ - pdbgdev->dev = pdev; - pdbgdev->kq = NULL; - pdbgdev->type = type; - pdbgdev->pqm = NULL; - - switch (type) { - case DBGDEV_TYPE_NODIQ: - pdbgdev->dbgdev_register = dbgdev_register_nodiq; - pdbgdev->dbgdev_unregister = dbgdev_unregister_nodiq; - pdbgdev->dbgdev_wave_control = dbgdev_wave_control_nodiq; - pdbgdev->dbgdev_address_watch = dbgdev_address_watch_nodiq; - break; - case DBGDEV_TYPE_DIQ: - default: - pdbgdev->dbgdev_register = dbgdev_register_diq; - pdbgdev->dbgdev_unregister = dbgdev_unregister_diq; - pdbgdev->dbgdev_wave_control = dbgdev_wave_control_diq; - pdbgdev->dbgdev_address_watch = dbgdev_address_watch_diq; - break; - } - -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h deleted file mode 100644 index 0619c777b47e..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef KFD_DBGDEV_H_ -#define KFD_DBGDEV_H_ - -enum { - SQ_CMD_VMID_OFFSET = 28, - ADDRESS_WATCH_CNTL_OFFSET = 24 -}; - -enum { - PRIV_QUEUE_SYNC_TIME_MS = 200 -}; - -/* CONTEXT reg space definition */ -enum { - CONTEXT_REG_BASE = 0xA000, - CONTEXT_REG_END = 0xA400, - CONTEXT_REG_SIZE = CONTEXT_REG_END - CONTEXT_REG_BASE -}; - -/* USER CONFIG reg space definition */ -enum { - USERCONFIG_REG_BASE = 0xC000, - USERCONFIG_REG_END = 0x10000, - USERCONFIG_REG_SIZE = USERCONFIG_REG_END - USERCONFIG_REG_BASE -}; - -/* CONFIG reg space definition */ -enum { - AMD_CONFIG_REG_BASE = 0x2000, /* in dwords */ - AMD_CONFIG_REG_END = 0x2B00, - AMD_CONFIG_REG_SIZE = AMD_CONFIG_REG_END - AMD_CONFIG_REG_BASE -}; - -/* SH reg space definition */ -enum { - SH_REG_BASE = 0x2C00, - SH_REG_END = 0x3000, - SH_REG_SIZE = SH_REG_END - SH_REG_BASE -}; - -/* SQ_CMD definitions */ -#define SQ_CMD 0x8DEC - -enum SQ_IND_CMD_CMD { - SQ_IND_CMD_CMD_NULL = 0x00000000, - SQ_IND_CMD_CMD_HALT = 0x00000001, - SQ_IND_CMD_CMD_RESUME = 0x00000002, - SQ_IND_CMD_CMD_KILL = 0x00000003, - SQ_IND_CMD_CMD_DEBUG = 0x00000004, - SQ_IND_CMD_CMD_TRAP = 0x00000005, -}; - -enum SQ_IND_CMD_MODE { - SQ_IND_CMD_MODE_SINGLE = 0x00000000, - SQ_IND_CMD_MODE_BROADCAST = 0x00000001, - SQ_IND_CMD_MODE_BROADCAST_QUEUE = 0x00000002, - SQ_IND_CMD_MODE_BROADCAST_PIPE = 0x00000003, - SQ_IND_CMD_MODE_BROADCAST_ME = 0x00000004, -}; - -union SQ_IND_INDEX_BITS { - struct { - uint32_t wave_id:4; - uint32_t simd_id:2; - uint32_t thread_id:6; - uint32_t:1; - uint32_t force_read:1; - uint32_t read_timeout:1; - uint32_t unindexed:1; - uint32_t index:16; - - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union SQ_IND_CMD_BITS { - struct { - uint32_t data:32; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union SQ_CMD_BITS { - struct { - uint32_t cmd:3; - uint32_t:1; - uint32_t mode:3; - uint32_t check_vmid:1; - uint32_t trap_id:3; - uint32_t:5; - uint32_t wave_id:4; - uint32_t simd_id:2; - uint32_t:2; - uint32_t queue_id:3; - uint32_t:1; - uint32_t vm_id:4; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union SQ_IND_DATA_BITS { - struct { - uint32_t data:32; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union GRBM_GFX_INDEX_BITS { - struct { - uint32_t instance_index:8; - uint32_t sh_index:8; - uint32_t se_index:8; - uint32_t:5; - uint32_t sh_broadcast_writes:1; - uint32_t instance_broadcast_writes:1; - uint32_t se_broadcast_writes:1; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union TCP_WATCH_ADDR_H_BITS { - struct { - uint32_t addr:16; - uint32_t:16; - - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -union TCP_WATCH_ADDR_L_BITS { - struct { - uint32_t:6; - uint32_t addr:26; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -enum { - QUEUESTATE__INVALID = 0, /* so by default we'll get invalid state */ - QUEUESTATE__ACTIVE_COMPLETION_PENDING, - QUEUESTATE__ACTIVE -}; - -union ULARGE_INTEGER { - struct { - uint32_t low_part; - uint32_t high_part; - } u; - unsigned long long quad_part; -}; - - -#define KFD_CIK_VMID_START_OFFSET (8) -#define KFD_CIK_VMID_END_OFFSET (KFD_CIK_VMID_START_OFFSET + (8)) - - -void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev, - enum DBGDEV_TYPE type); - -union TCP_WATCH_CNTL_BITS { - struct { - uint32_t mask:24; - uint32_t vmid:4; - uint32_t atc:1; - uint32_t mode:2; - uint32_t valid:1; - } bitfields, bits; - uint32_t u32All; - signed int i32All; - float f32All; -}; - -enum { - ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, - ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, - ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, - /* extend the mask to 26 bits in order to match the low address field */ - ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, - ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF -}; - -enum { - MAX_TRAPID = 8, /* 3 bits in the bitfield. */ - MAX_WATCH_ADDRESSES = 4 -}; - -enum { - ADDRESS_WATCH_REG_ADDR_HI = 0, - ADDRESS_WATCH_REG_ADDR_LO, - ADDRESS_WATCH_REG_CNTL, - ADDRESS_WATCH_REG_MAX -}; - -#endif /* KFD_DBGDEV_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c deleted file mode 100644 index 9bfa50633654..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include -#include -#include -#include -#include -#include - -#include "kfd_priv.h" -#include "cik_regs.h" -#include "kfd_pm4_headers.h" -#include "kfd_pm4_headers_diq.h" -#include "kfd_dbgmgr.h" -#include "kfd_dbgdev.h" -#include "kfd_device_queue_manager.h" - -static DEFINE_MUTEX(kfd_dbgmgr_mutex); - -struct mutex *kfd_get_dbgmgr_mutex(void) -{ - return &kfd_dbgmgr_mutex; -} - - -static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr) -{ - kfree(pmgr->dbgdev); - - pmgr->dbgdev = NULL; - pmgr->pasid = 0; - pmgr->dev = NULL; -} - -void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr) -{ - if (pmgr) { - kfd_dbgmgr_uninitialize(pmgr); - kfree(pmgr); - } -} - -bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) -{ - enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; - struct kfd_dbgmgr *new_buff; - - if (WARN_ON(!pdev->init_complete)) - return false; - - new_buff = kfd_alloc_struct(new_buff); - if (!new_buff) { - pr_err("Failed to allocate dbgmgr instance\n"); - return false; - } - - new_buff->pasid = 0; - new_buff->dev = pdev; - new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev); - if (!new_buff->dbgdev) { - pr_err("Failed to allocate dbgdev instance\n"); - kfree(new_buff); - return false; - } - - /* get actual type of DBGDevice cpsch or not */ - if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) - type = DBGDEV_TYPE_NODIQ; - - kfd_dbgdev_init(new_buff->dbgdev, pdev, type); - *ppmgr = new_buff; - - return true; -} - -long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) -{ - if (pmgr->pasid != 0) { - pr_debug("H/W debugger is already active using pasid 0x%x\n", - pmgr->pasid); - return -EBUSY; - } - - /* remember pasid */ - pmgr->pasid = p->pasid; - - /* provide the pqm for diq generation */ - pmgr->dbgdev->pqm = &p->pqm; - - /* activate the actual registering */ - pmgr->dbgdev->dbgdev_register(pmgr->dbgdev); - - return 0; -} - -long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) -{ - /* Is the requests coming from the already registered process? */ - if (pmgr->pasid != p->pasid) { - pr_debug("H/W debugger is not registered by calling pasid 0x%x\n", - p->pasid); - return -EINVAL; - } - - pmgr->dbgdev->dbgdev_unregister(pmgr->dbgdev); - - pmgr->pasid = 0; - - return 0; -} - -long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, - struct dbg_wave_control_info *wac_info) -{ - /* Is the requests coming from the already registered process? */ - if (pmgr->pasid != wac_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", - wac_info->process->pasid); - return -EINVAL; - } - - return (long) pmgr->dbgdev->dbgdev_wave_control(pmgr->dbgdev, wac_info); -} - -long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, - struct dbg_address_watch_info *adw_info) -{ - /* Is the requests coming from the already registered process? */ - if (pmgr->pasid != adw_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", - adw_info->process->pasid); - return -EINVAL; - } - - return (long) pmgr->dbgdev->dbgdev_address_watch(pmgr->dbgdev, - adw_info); -} - diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h deleted file mode 100644 index f9c6df1fdc5c..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef KFD_DBGMGR_H_ -#define KFD_DBGMGR_H_ - -#include "kfd_priv.h" - -/* must align with hsakmttypes definition */ -#pragma pack(push, 4) - -enum HSA_DBG_WAVEOP { - HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */ - HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */ - HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */ - HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */ - HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */ - HSA_DBG_NUM_WAVEOP = 5, - HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF -}; - -enum HSA_DBG_WAVEMODE { - /* send command to a single wave */ - HSA_DBG_WAVEMODE_SINGLE = 0, - /* - * Broadcast to all wavefronts of all processes is not - * supported for HSA user mode - */ - - /* send to waves within current process */ - HSA_DBG_WAVEMODE_BROADCAST_PROCESS = 2, - /* send to waves within current process on CU */ - HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU = 3, - HSA_DBG_NUM_WAVEMODE = 3, - HSA_DBG_MAX_WAVEMODE = 0xFFFFFFFF -}; - -enum HSA_DBG_WAVEMSG_TYPE { - HSA_DBG_WAVEMSG_AUTO = 0, - HSA_DBG_WAVEMSG_USER = 1, - HSA_DBG_WAVEMSG_ERROR = 2, - HSA_DBG_NUM_WAVEMSG, - HSA_DBG_MAX_WAVEMSG = 0xFFFFFFFF -}; - -enum HSA_DBG_WATCH_MODE { - HSA_DBG_WATCH_READ = 0, /* Read operations only */ - HSA_DBG_WATCH_NONREAD = 1, /* Write or Atomic operations only */ - HSA_DBG_WATCH_ATOMIC = 2, /* Atomic Operations only */ - HSA_DBG_WATCH_ALL = 3, /* Read, Write or Atomic operations */ - HSA_DBG_WATCH_NUM, - HSA_DBG_WATCH_SIZE = 0xFFFFFFFF -}; - -/* This structure is hardware specific and may change in the future */ -struct HsaDbgWaveMsgAMDGen2 { - union { - struct ui32 { - uint32_t UserData:8; /* user data */ - uint32_t ShaderArray:1; /* Shader array */ - uint32_t Priv:1; /* Privileged */ - uint32_t Reserved0:4; /* Reserved, should be 0 */ - uint32_t WaveId:4; /* wave id */ - uint32_t SIMD:2; /* SIMD id */ - uint32_t HSACU:4; /* Compute unit */ - uint32_t ShaderEngine:2;/* Shader engine */ - uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */ - uint32_t Reserved1:4; /* Reserved, should be 0 */ - } ui32; - uint32_t Value; - }; - uint32_t Reserved2; -}; - -union HsaDbgWaveMessageAMD { - struct HsaDbgWaveMsgAMDGen2 WaveMsgInfoGen2; - /* for future HsaDbgWaveMsgAMDGen3; */ -}; - -struct HsaDbgWaveMessage { - void *MemoryVA; /* ptr to associated host-accessible data */ - union HsaDbgWaveMessageAMD DbgWaveMsg; -}; - -/* - * TODO: This definitions to be MOVED to kfd_event, once it is implemented. - * - * HSA sync primitive, Event and HW Exception notification API definitions. - * The API functions allow the runtime to define a so-called sync-primitive, - * a SW object combining a user-mode provided "syncvar" and a scheduler event - * that can be signaled through a defined GPU interrupt. A syncvar is - * a process virtual memory location of a certain size that can be accessed - * by CPU and GPU shader code within the process to set and query the content - * within that memory. The definition of the content is determined by the HSA - * runtime and potentially GPU shader code interfacing with the HSA runtime. - * The syncvar values may be commonly written through an PM4 WRITE_DATA packet - * in the user mode instruction stream. The OS scheduler event is typically - * associated and signaled by an interrupt issued by the GPU, but other HSA - * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced - * by the KFD by this mechanism, too. - */ - -/* these are the new definitions for events */ -enum HSA_EVENTTYPE { - HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */ - HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */ - HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change - * (start/stop) - */ - HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */ - HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */ - HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */ - HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */ - HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state - * (EOP pm4) - */ - /* ... */ - HSA_EVENTTYPE_MAXID, - HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF -}; - -/* Sub-definitions for various event types: Syncvar */ -struct HsaSyncVar { - union SyncVar { - void *UserData; /* pointer to user mode data */ - uint64_t UserDataPtrValue; /* 64bit compatibility of value */ - } SyncVar; - uint64_t SyncVarSize; -}; - -/* Sub-definitions for various event types: NodeChange */ - -enum HSA_EVENTTYPE_NODECHANGE_FLAGS { - HSA_EVENTTYPE_NODECHANGE_ADD = 0, - HSA_EVENTTYPE_NODECHANGE_REMOVE = 1, - HSA_EVENTTYPE_NODECHANGE_SIZE = 0xFFFFFFFF -}; - -struct HsaNodeChange { - /* HSA node added/removed on the platform */ - enum HSA_EVENTTYPE_NODECHANGE_FLAGS Flags; -}; - -/* Sub-definitions for various event types: DeviceStateChange */ -enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS { - /* device started (and available) */ - HSA_EVENTTYPE_DEVICESTATUSCHANGE_START = 0, - /* device stopped (i.e. unavailable) */ - HSA_EVENTTYPE_DEVICESTATUSCHANGE_STOP = 1, - HSA_EVENTTYPE_DEVICESTATUSCHANGE_SIZE = 0xFFFFFFFF -}; - -enum HSA_DEVICE { - HSA_DEVICE_CPU = 0, - HSA_DEVICE_GPU = 1, - MAX_HSA_DEVICE = 2 -}; - -struct HsaDeviceStateChange { - uint32_t NodeId; /* F-NUMA node that contains the device */ - enum HSA_DEVICE Device; /* device type: GPU or CPU */ - enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS Flags; /* event flags */ -}; - -struct HsaEventData { - enum HSA_EVENTTYPE EventType; /* event type */ - union EventData { - /* - * return data associated with HSA_EVENTTYPE_SIGNAL - * and other events - */ - struct HsaSyncVar SyncVar; - - /* data associated with HSA_EVENTTYPE_NODE_CHANGE */ - struct HsaNodeChange NodeChangeState; - - /* data associated with HSA_EVENTTYPE_DEVICE_STATE_CHANGE */ - struct HsaDeviceStateChange DeviceState; - } EventData; - - /* the following data entries are internal to the KFD & thunk itself */ - - /* internal thunk store for Event data (OsEventHandle) */ - uint64_t HWData1; - /* internal thunk store for Event data (HWAddress) */ - uint64_t HWData2; - /* internal thunk store for Event data (HWData) */ - uint32_t HWData3; -}; - -struct HsaEventDescriptor { - /* event type to allocate */ - enum HSA_EVENTTYPE EventType; - /* H-NUMA node containing GPU device that is event source */ - uint32_t NodeId; - /* pointer to user mode syncvar data, syncvar->UserDataPtrValue - * may be NULL - */ - struct HsaSyncVar SyncVar; -}; - -struct HsaEvent { - uint32_t EventId; - struct HsaEventData EventData; -}; - -#pragma pack(pop) - -enum DBGDEV_TYPE { - DBGDEV_TYPE_ILLEGAL = 0, - DBGDEV_TYPE_NODIQ = 1, - DBGDEV_TYPE_DIQ = 2, - DBGDEV_TYPE_TEST = 3 -}; - -struct dbg_address_watch_info { - struct kfd_process *process; - enum HSA_DBG_WATCH_MODE *watch_mode; - uint64_t *watch_address; - uint64_t *watch_mask; - struct HsaEvent *watch_event; - uint32_t num_watch_points; -}; - -struct dbg_wave_control_info { - struct kfd_process *process; - uint32_t trapId; - enum HSA_DBG_WAVEOP operand; - enum HSA_DBG_WAVEMODE mode; - struct HsaDbgWaveMessage dbgWave_msg; -}; - -struct kfd_dbgdev { - - /* The device that owns this data. */ - struct kfd_dev *dev; - - /* kernel queue for DIQ */ - struct kernel_queue *kq; - - /* a pointer to the pqm of the calling process */ - struct process_queue_manager *pqm; - - /* type of debug device ( DIQ, non DIQ, etc. ) */ - enum DBGDEV_TYPE type; - - /* virtualized function pointers to device dbg */ - int (*dbgdev_register)(struct kfd_dbgdev *dbgdev); - int (*dbgdev_unregister)(struct kfd_dbgdev *dbgdev); - int (*dbgdev_address_watch)(struct kfd_dbgdev *dbgdev, - struct dbg_address_watch_info *adw_info); - int (*dbgdev_wave_control)(struct kfd_dbgdev *dbgdev, - struct dbg_wave_control_info *wac_info); - -}; - -struct kfd_dbgmgr { - u32 pasid; - struct kfd_dev *dev; - struct kfd_dbgdev *dbgdev; -}; - -/* prototypes for debug manager functions */ -struct mutex *kfd_get_dbgmgr_mutex(void); -void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr); -bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev); -long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p); -long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p); -long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, - struct dbg_wave_control_info *wac_info); -long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, - struct dbg_address_watch_info *adw_info); -#endif /* KFD_DBGMGR_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 5a47f437b455..dbb877fba724 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -575,8 +575,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd_resume(kfd)) goto kfd_resume_error; - kfd->dbgmgr = NULL; - if (kfd_topology_add_device(kfd)) { dev_err(kfd_device, "Error adding device to topology\n"); goto kfd_topology_add_device_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 219acc818eb4..7f6f1a842b0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -480,6 +480,65 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, dqm->allocated_queues[q->pipe] |= (1 << q->queue); } +#define SQ_IND_CMD_CMD_KILL 0x00000003 +#define SQ_IND_CMD_MODE_BROADCAST 0x00000001 + +static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) +{ + int status = 0; + unsigned int vmid; + uint16_t queried_pasid; + union SQ_CMD_BITS reg_sq_cmd; + union GRBM_GFX_INDEX_BITS reg_gfx_index; + struct kfd_process_device *pdd; + int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; + int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; + + reg_sq_cmd.u32All = 0; + reg_gfx_index.u32All = 0; + + pr_debug("Killing all process wavefronts\n"); + + /* Scan all registers in the range ATC_VMID8_PASID_MAPPING .. + * ATC_VMID15_PASID_MAPPING + * to check which VMID the current process is mapped to. + */ + + for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { + status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info + (dev->adev, vmid, &queried_pasid); + + if (status && queried_pasid == p->pasid) { + pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", + vmid, p->pasid); + break; + } + } + + if (vmid > last_vmid_to_scan) { + pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); + return -EFAULT; + } + + /* taking the VMID for that process on the safe way using PDD */ + pdd = kfd_get_process_device_data(dev, p); + if (!pdd) + return -EFAULT; + + reg_gfx_index.bits.sh_broadcast_writes = 1; + reg_gfx_index.bits.se_broadcast_writes = 1; + reg_gfx_index.bits.instance_broadcast_writes = 1; + reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST; + reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; + reg_sq_cmd.bits.vm_id = vmid; + + dev->kfd2kgd->wave_control_execute(dev->adev, + reg_gfx_index.u32All, + reg_sq_cmd.u32All); + + return 0; +} + /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked * to avoid asynchronized access */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a7d2e3323977..05a9c17daa3e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -39,6 +39,41 @@ struct device_process_node { struct list_head list; }; +union SQ_CMD_BITS { + struct { + uint32_t cmd:3; + uint32_t:1; + uint32_t mode:3; + uint32_t check_vmid:1; + uint32_t trap_id:3; + uint32_t:5; + uint32_t wave_id:4; + uint32_t simd_id:2; + uint32_t:2; + uint32_t queue_id:3; + uint32_t:1; + uint32_t vm_id:4; + } bitfields, bits; + uint32_t u32All; + signed int i32All; + float f32All; +}; + +union GRBM_GFX_INDEX_BITS { + struct { + uint32_t instance_index:8; + uint32_t sh_index:8; + uint32_t se_index:8; + uint32_t:5; + uint32_t sh_broadcast_writes:1; + uint32_t instance_broadcast_writes:1; + uint32_t se_broadcast_writes:1; + } bitfields, bits; + uint32_t u32All; + signed int i32All; + float f32All; +}; + /** * struct device_queue_manager_ops * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 66ad8d0b8f7f..fe62407dacb2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -30,7 +30,6 @@ #include #include #include "kfd_priv.h" -#include "kfd_dbgmgr.h" #include "kfd_topology.h" #include "kfd_iommu.h" @@ -163,17 +162,6 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) pr_debug("Unbinding process 0x%x from IOMMU\n", pasid); - mutex_lock(kfd_get_dbgmgr_mutex()); - - if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { - if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { - kfd_dbgmgr_destroy(dev->dbgmgr); - dev->dbgmgr = NULL; - } - } - - mutex_unlock(kfd_get_dbgmgr_mutex()); - mutex_lock(&p->mutex); pdd = kfd_get_process_device_data(dev, p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 74fb4762c66e..b6790a637f5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -300,9 +300,6 @@ struct kfd_dev { */ bool interrupts_active; - /* Debug manager */ - struct kfd_dbgmgr *dbgmgr; - /* Firmware versions */ uint16_t mec_fw_version; uint16_t mec2_fw_version; @@ -1331,8 +1328,6 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); -int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); - bool kfd_is_locked(void); /* Compute profile */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 8e2780d2f735..8c6a48add76e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -40,7 +40,6 @@ struct mm_struct; #include "kfd_priv.h" #include "kfd_device_queue_manager.h" -#include "kfd_dbgmgr.h" #include "kfd_iommu.h" #include "kfd_svm.h" @@ -1158,7 +1157,6 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct kfd_process *p; - int i; /* * The kfd_process structure can not be free because the @@ -1178,23 +1176,6 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, mutex_lock(&p->mutex); - /* Iterate over all process device data structures and if the - * pdd is in debug mode, we should first force unregistration, - * then we will be able to destroy the queues - */ - for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; - - mutex_lock(kfd_get_dbgmgr_mutex()); - if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) { - if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) { - kfd_dbgmgr_destroy(dev->dbgmgr); - dev->dbgmgr = NULL; - } - } - mutex_unlock(kfd_get_dbgmgr_mutex()); - } - kfd_process_dequeue_from_all_devices(p); pqm_uninit(&p->pqm); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index ac941f62cbed..2f60cf35a444 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -269,18 +269,9 @@ struct kfd2kgd_calls { int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, unsigned int timeout); - int (*address_watch_disable)(struct amdgpu_device *adev); - int (*address_watch_execute)(struct amdgpu_device *adev, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); - uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev, - unsigned int watch_point_id, - unsigned int reg_offset); bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index e6a56c146920..6e4268f5e482 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -756,16 +756,16 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_WAIT_EVENTS \ AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) -#define AMDKFD_IOC_DBG_REGISTER \ +#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \ AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) -#define AMDKFD_IOC_DBG_UNREGISTER \ +#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \ AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) -#define AMDKFD_IOC_DBG_ADDRESS_WATCH \ +#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \ AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) -#define AMDKFD_IOC_DBG_WAVE_CONTROL \ +#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ -- cgit v1.2.3-71-gd317 From 5cad527d5ffa9a1c4731bb9c97d2ee93f8960d50 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Wed, 9 Feb 2022 14:08:38 +0800 Subject: net: drop_monitor: support drop reason In the commit c504e5c2f964 ("net: skb: introduce kfree_skb_reason()") drop reason is introduced to the tracepoint of kfree_skb. Therefore, drop_monitor is able to report the drop reason to users by netlink. The drop reasons are reported as string to users, which is exactly the same as what we do when reporting it to ftrace. Signed-off-by: Menglong Dong Reviewed-by: Ido Schimmel Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20220209060838.55513-1-imagedong@tencent.com Signed-off-by: Jakub Kicinski --- include/uapi/linux/net_dropmon.h | 1 + net/core/drop_monitor.c | 41 ++++++++++++++++++++++++++++++++++------ 2 files changed, 36 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 66048cc5d7b3..1bbea8f0681e 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -93,6 +93,7 @@ enum net_dm_attr { NET_DM_ATTR_SW_DROPS, /* flag */ NET_DM_ATTR_HW_DROPS, /* flag */ NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */ + NET_DM_ATTR_REASON, /* string */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 7b288a121a41..4641126b8a20 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -48,6 +48,19 @@ static int trace_state = TRACE_OFF; static bool monitor_hw; +#undef EM +#undef EMe + +#define EM(a, b) [a] = #b, +#define EMe(a, b) [a] = #b + +/* drop_reasons is used to translate 'enum skb_drop_reason' to string, + * which is reported to user space. + */ +static const char * const drop_reasons[] = { + TRACE_SKB_DROP_REASON +}; + /* net_dm_mutex * * An overall lock guarding every operation coming from userspace. @@ -126,6 +139,7 @@ struct net_dm_skb_cb { struct devlink_trap_metadata *hw_metadata; void *pc; }; + enum skb_drop_reason reason; }; #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0])) @@ -498,6 +512,7 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore, { ktime_t tstamp = ktime_get_real(); struct per_cpu_dm_data *data; + struct net_dm_skb_cb *cb; struct sk_buff *nskb; unsigned long flags; @@ -508,7 +523,11 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore, if (!nskb) return; - NET_DM_SKB_CB(nskb)->pc = location; + if ((unsigned int)reason >= SKB_DROP_REASON_MAX) + reason = SKB_DROP_REASON_NOT_SPECIFIED; + cb = NET_DM_SKB_CB(nskb); + cb->reason = reason; + cb->pc = location; /* Override the timestamp because we care about the time when the * packet was dropped. */ @@ -553,7 +572,8 @@ static size_t net_dm_in_port_size(void) #define NET_DM_MAX_SYMBOL_LEN 40 -static size_t net_dm_packet_report_size(size_t payload_len) +static size_t net_dm_packet_report_size(size_t payload_len, + enum skb_drop_reason reason) { size_t size; @@ -574,6 +594,8 @@ static size_t net_dm_packet_report_size(size_t payload_len) nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ nla_total_size(sizeof(u16)) + + /* NET_DM_ATTR_REASON */ + nla_total_size(strlen(drop_reasons[reason]) + 1) + /* NET_DM_ATTR_PAYLOAD */ nla_total_size(payload_len); } @@ -606,7 +628,7 @@ nla_put_failure: static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, size_t payload_len) { - u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc; + struct net_dm_skb_cb *cb = NET_DM_SKB_CB(skb); char buf[NET_DM_MAX_SYMBOL_LEN]; struct nlattr *attr; void *hdr; @@ -620,10 +642,15 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW)) goto nla_put_failure; - if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD)) + if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, (u64)(uintptr_t)cb->pc, + NET_DM_ATTR_PAD)) + goto nla_put_failure; + + if (nla_put_string(msg, NET_DM_ATTR_REASON, + drop_reasons[cb->reason])) goto nla_put_failure; - snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc); + snprintf(buf, sizeof(buf), "%pS", cb->pc); if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf)) goto nla_put_failure; @@ -679,7 +706,9 @@ static void net_dm_packet_report(struct sk_buff *skb) if (net_dm_trunc_len) payload_len = min_t(size_t, net_dm_trunc_len, payload_len); - msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL); + msg = nlmsg_new(net_dm_packet_report_size(payload_len, + NET_DM_SKB_CB(skb)->reason), + GFP_KERNEL); if (!msg) goto out; -- cgit v1.2.3-71-gd317 From 7f5a08c79df35e68f1a43033450c5050f12bc155 Mon Sep 17 00:00:00 2001 From: Beau Belgrave Date: Tue, 18 Jan 2022 12:43:15 -0800 Subject: user_events: Add minimal support for trace_event into ftrace Minimal support for interacting with dynamic events, trace_event and ftrace. Core outline of flow between user process, ioctl and trace_event APIs. User mode processes that wish to use trace events to get data into ftrace, perf, eBPF, etc are limited to uprobes today. The user events features enables an ABI for user mode processes to create and write to trace events that are isolated from kernel level trace events. This enables a faster path for tracing from user mode data as well as opens managed code to participate in trace events, where stub locations are dynamic. User processes often want to trace only when it's useful. To enable this a set of pages are mapped into the user process space that indicate the current state of the user events that have been registered. User processes can check if their event is hooked to a trace/probe, and if it is, emit the event data out via the write() syscall. Two new files are introduced into tracefs to accomplish this: user_events_status - This file is mmap'd into participating user mode processes to indicate event status. user_events_data - This file is opened and register/delete ioctl's are issued to create/open/delete trace events that can be used for tracing. The typical scenario is on process start to mmap user_events_status. Processes then register the events they plan to use via the REG ioctl. The ioctl reads and updates the passed in user_reg struct. The status_index of the struct is used to know the byte in the status page to check for that event. The write_index of the struct is used to describe that event when writing out to the fd that was used for the ioctl call. The data must always include this index first when writing out data for an event. Data can be written either by write() or by writev(). For example, in memory: int index; char data[]; Psuedo code example of typical usage: struct user_reg reg; int page_fd = open("user_events_status", O_RDWR); char *page_data = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, page_fd, 0); close(page_fd); int data_fd = open("user_events_data", O_RDWR); reg.size = sizeof(reg); reg.name_args = (__u64)"test"; ioctl(data_fd, DIAG_IOCSREG, ®); int status_id = reg.status_index; int write_id = reg.write_index; struct iovec io[2]; io[0].iov_base = &write_id; io[0].iov_len = sizeof(write_id); io[1].iov_base = payload; io[1].iov_len = sizeof(payload); if (page_data[status_id]) writev(data_fd, io, 2); User events are also exposed via the dynamic_events tracefs file for both create and delete. Current status is exposed via the user_events_status tracefs file. Simple example to register a user event via dynamic_events: echo u:test >> dynamic_events cat dynamic_events u:test If an event is hooked to a probe, the probe hooked shows up: echo 1 > events/user_events/test/enable cat user_events_status 1:test # Used by ftrace Active: 1 Busy: 1 Max: 4096 If an event is not hooked to a probe, no probe status shows up: echo 0 > events/user_events/test/enable cat user_events_status 1:test Active: 1 Busy: 0 Max: 4096 Users can describe the trace event format via the following format: name[:FLAG1[,FLAG2...] [field1[;field2...]] Each field has the following format: type name Example for char array with a size of 20 named msg: echo 'u:detailed char[20] msg' >> dynamic_events cat dynamic_events u:detailed char[20] msg Data offsets are based on the data written out via write() and will be updated to reflect the correct offset in the trace_event fields. For dynamic data it is recommended to use the new __rel_loc data type. This type will be the same as __data_loc, but the offset is relative to this entry. This allows user_events to not worry about what common fields are being inserted before the data. The above format is valid for both the ioctl and the dynamic_events file. Link: https://lkml.kernel.org/r/20220118204326.2169-2-beaub@linux.microsoft.com Acked-by: Masami Hiramatsu Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- include/uapi/linux/user_events.h | 116 ++++ kernel/trace/Kconfig | 14 + kernel/trace/Makefile | 1 + kernel/trace/trace_events_user.c | 1187 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 1318 insertions(+) create mode 100644 include/uapi/linux/user_events.h create mode 100644 kernel/trace/trace_events_user.c (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h new file mode 100644 index 000000000000..e570840571e1 --- /dev/null +++ b/include/uapi/linux/user_events.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave + */ +#ifndef _UAPI_LINUX_USER_EVENTS_H +#define _UAPI_LINUX_USER_EVENTS_H + +#include +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#define USER_EVENTS_SYSTEM "user_events" +#define USER_EVENTS_PREFIX "u:" + +/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ +#define EVENT_BIT_FTRACE 0 +#define EVENT_BIT_PERF 1 +#define EVENT_BIT_OTHER 7 + +#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) +#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) +#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) + +/* Create dynamic location entry within a 32-bit value */ +#define DYN_LOC(offset, size) ((size) << 16 | (offset)) + +/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ +#define FLAG_BPF_ITER (1 << 0) + +/* + * Describes an event registration and stores the results of the registration. + * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum + * must set the size and name_args before invocation. + */ +struct user_reg { + + /* Input: Size of the user_reg structure being used */ + __u32 size; + + /* Input: Pointer to string with event name, description and flags */ + __u64 name_args; + + /* Output: Byte index of the event within the status page */ + __u32 status_index; + + /* Output: Index of the event to use when writing data */ + __u32 write_index; +}; + +#define DIAG_IOC_MAGIC '*' + +/* Requests to register a user_event */ +#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) + +/* Requests to delete a user_event */ +#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) + +/* Data type that was passed to the BPF program */ +enum { + /* Data resides in kernel space */ + USER_BPF_DATA_KERNEL, + + /* Data resides in user space */ + USER_BPF_DATA_USER, + + /* Data is a pointer to a user_bpf_iter structure */ + USER_BPF_DATA_ITER, +}; + +/* + * Describes an iovec iterator that BPF programs can use to access data for + * a given user_event write() / writev() call. + */ +struct user_bpf_iter { + + /* Offset of the data within the first iovec */ + __u32 iov_offset; + + /* Number of iovec structures */ + __u32 nr_segs; + + /* Pointer to iovec structures */ + const struct iovec *iov; +}; + +/* Context that BPF programs receive when attached to a user_event */ +struct user_bpf_context { + + /* Data type being passed (see union below) */ + __u32 data_type; + + /* Length of the data */ + __u32 data_len; + + /* Pointer to data, varies by data type */ + union { + /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ + void *kdata; + + /* User data (data_type == USER_BPF_DATA_USER) */ + void *udata; + + /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ + struct user_bpf_iter *iter; + }; +}; + +#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a5eb5e7fd624..16a52a71732d 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -737,6 +737,20 @@ config SYNTH_EVENTS If in doubt, say N. +config USER_EVENTS + bool "User trace events" + select TRACING + select DYNAMIC_EVENTS + help + User trace events are user-defined trace events that + can be used like an existing kernel trace event. User trace + events are generated by writing to a tracefs file. User + processes can determine if their tracing events should be + generated by memory mapping a tracefs file and checking for + an associated byte being non-zero. + + If in doubt, say N. + config HIST_TRIGGERS bool "Histogram triggers" depends on ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index bedc5caceec7..19ef3758da95 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -82,6 +82,7 @@ obj-$(CONFIG_PROBE_EVENTS) += trace_eprobe.o obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o +obj-$(CONFIG_USER_EVENTS) += trace_events_user.o obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o obj-$(CONFIG_TRACEPOINTS) += error_report-traces.o diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c new file mode 100644 index 000000000000..77105233115e --- /dev/null +++ b/kernel/trace/trace_events_user.c @@ -0,0 +1,1187 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" +#include "trace_dynevent.h" + +#define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) + +#define FIELD_DEPTH_TYPE 0 +#define FIELD_DEPTH_NAME 1 +#define FIELD_DEPTH_SIZE 2 + +/* + * Limits how many trace_event calls user processes can create: + * Must be multiple of PAGE_SIZE. + */ +#define MAX_PAGES 1 +#define MAX_EVENTS (MAX_PAGES * PAGE_SIZE) + +/* Limit how long of an event name plus args within the subsystem. */ +#define MAX_EVENT_DESC 512 +#define EVENT_NAME(user_event) ((user_event)->tracepoint.name) +#define MAX_FIELD_ARRAY_SIZE 1024 + +static char *register_page_data; + +static DEFINE_MUTEX(reg_mutex); +static DEFINE_HASHTABLE(register_table, 4); +static DECLARE_BITMAP(page_bitmap, MAX_EVENTS); + +/* + * Stores per-event properties, as users register events + * within a file a user_event might be created if it does not + * already exist. These are globally used and their lifetime + * is tied to the refcnt member. These cannot go away until the + * refcnt reaches zero. + */ +struct user_event { + struct tracepoint tracepoint; + struct trace_event_call call; + struct trace_event_class class; + struct dyn_event devent; + struct hlist_node node; + struct list_head fields; + atomic_t refcnt; + int index; + int flags; +}; + +/* + * Stores per-file events references, as users register events + * within a file this structure is modified and freed via RCU. + * The lifetime of this struct is tied to the lifetime of the file. + * These are not shared and only accessible by the file that created it. + */ +struct user_event_refs { + struct rcu_head rcu; + int count; + struct user_event *events[]; +}; + +typedef void (*user_event_func_t) (struct user_event *user, + void *data, u32 datalen, + void *tpdata); + +static int user_event_parse(char *name, char *args, char *flags, + struct user_event **newuser); + +static u32 user_event_key(char *name) +{ + return jhash(name, strlen(name), 0); +} + +static struct list_head *user_event_get_fields(struct trace_event_call *call) +{ + struct user_event *user = (struct user_event *)call->data; + + return &user->fields; +} + +/* + * Parses a register command for user_events + * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] + * + * Example event named 'test' with a 20 char 'msg' field with an unsigned int + * 'id' field after: + * test char[20] msg;unsigned int id + * + * NOTE: Offsets are from the user data perspective, they are not from the + * trace_entry/buffer perspective. We automatically add the common properties + * sizes to the offset for the user. + */ +static int user_event_parse_cmd(char *raw_command, struct user_event **newuser) +{ + char *name = raw_command; + char *args = strpbrk(name, " "); + char *flags; + + if (args) + *args++ = '\0'; + + flags = strpbrk(name, ":"); + + if (flags) + *flags++ = '\0'; + + return user_event_parse(name, args, flags, newuser); +} + +static int user_field_array_size(const char *type) +{ + const char *start = strchr(type, '['); + char val[8]; + char *bracket; + int size = 0; + + if (start == NULL) + return -EINVAL; + + if (strscpy(val, start + 1, sizeof(val)) <= 0) + return -EINVAL; + + bracket = strchr(val, ']'); + + if (!bracket) + return -EINVAL; + + *bracket = '\0'; + + if (kstrtouint(val, 0, &size)) + return -EINVAL; + + if (size > MAX_FIELD_ARRAY_SIZE) + return -EINVAL; + + return size; +} + +static int user_field_size(const char *type) +{ + /* long is not allowed from a user, since it's ambigious in size */ + if (strcmp(type, "s64") == 0) + return sizeof(s64); + if (strcmp(type, "u64") == 0) + return sizeof(u64); + if (strcmp(type, "s32") == 0) + return sizeof(s32); + if (strcmp(type, "u32") == 0) + return sizeof(u32); + if (strcmp(type, "int") == 0) + return sizeof(int); + if (strcmp(type, "unsigned int") == 0) + return sizeof(unsigned int); + if (strcmp(type, "s16") == 0) + return sizeof(s16); + if (strcmp(type, "u16") == 0) + return sizeof(u16); + if (strcmp(type, "short") == 0) + return sizeof(short); + if (strcmp(type, "unsigned short") == 0) + return sizeof(unsigned short); + if (strcmp(type, "s8") == 0) + return sizeof(s8); + if (strcmp(type, "u8") == 0) + return sizeof(u8); + if (strcmp(type, "char") == 0) + return sizeof(char); + if (strcmp(type, "unsigned char") == 0) + return sizeof(unsigned char); + if (str_has_prefix(type, "char[")) + return user_field_array_size(type); + if (str_has_prefix(type, "unsigned char[")) + return user_field_array_size(type); + if (str_has_prefix(type, "__data_loc ")) + return sizeof(u32); + if (str_has_prefix(type, "__rel_loc ")) + return sizeof(u32); + + /* Uknown basic type, error */ + return -EINVAL; +} + +static void user_event_destroy_fields(struct user_event *user) +{ + struct ftrace_event_field *field, *next; + struct list_head *head = &user->fields; + + list_for_each_entry_safe(field, next, head, link) { + list_del(&field->link); + kfree(field); + } +} + +static int user_event_add_field(struct user_event *user, const char *type, + const char *name, int offset, int size, + int is_signed, int filter_type) +{ + struct ftrace_event_field *field; + + field = kmalloc(sizeof(*field), GFP_KERNEL); + + if (!field) + return -ENOMEM; + + field->type = type; + field->name = name; + field->offset = offset; + field->size = size; + field->is_signed = is_signed; + field->filter_type = filter_type; + + list_add(&field->link, &user->fields); + + return 0; +} + +/* + * Parses the values of a field within the description + * Format: type name [size] + */ +static int user_event_parse_field(char *field, struct user_event *user, + u32 *offset) +{ + char *part, *type, *name; + u32 depth = 0, saved_offset = *offset; + int len, size = -EINVAL; + bool is_struct = false; + + field = skip_spaces(field); + + if (*field == '\0') + return 0; + + /* Handle types that have a space within */ + len = str_has_prefix(field, "unsigned "); + if (len) + goto skip_next; + + len = str_has_prefix(field, "struct "); + if (len) { + is_struct = true; + goto skip_next; + } + + len = str_has_prefix(field, "__data_loc unsigned "); + if (len) + goto skip_next; + + len = str_has_prefix(field, "__data_loc "); + if (len) + goto skip_next; + + len = str_has_prefix(field, "__rel_loc unsigned "); + if (len) + goto skip_next; + + len = str_has_prefix(field, "__rel_loc "); + if (len) + goto skip_next; + + goto parse; +skip_next: + type = field; + field = strpbrk(field + len, " "); + + if (field == NULL) + return -EINVAL; + + *field++ = '\0'; + depth++; +parse: + while ((part = strsep(&field, " ")) != NULL) { + switch (depth++) { + case FIELD_DEPTH_TYPE: + type = part; + break; + case FIELD_DEPTH_NAME: + name = part; + break; + case FIELD_DEPTH_SIZE: + if (!is_struct) + return -EINVAL; + + if (kstrtou32(part, 10, &size)) + return -EINVAL; + break; + default: + return -EINVAL; + } + } + + if (depth < FIELD_DEPTH_SIZE) + return -EINVAL; + + if (depth == FIELD_DEPTH_SIZE) + size = user_field_size(type); + + if (size == 0) + return -EINVAL; + + if (size < 0) + return size; + + *offset = saved_offset + size; + + return user_event_add_field(user, type, name, saved_offset, size, + type[0] != 'u', FILTER_OTHER); +} + +static void user_event_parse_flags(struct user_event *user, char *flags) +{ + char *flag; + + if (flags == NULL) + return; + + while ((flag = strsep(&flags, ",")) != NULL) { + if (strcmp(flag, "BPF_ITER") == 0) + user->flags |= FLAG_BPF_ITER; + } +} + +static int user_event_parse_fields(struct user_event *user, char *args) +{ + char *field; + u32 offset = sizeof(struct trace_entry); + int ret = -EINVAL; + + if (args == NULL) + return 0; + + while ((field = strsep(&args, ";")) != NULL) { + ret = user_event_parse_field(field, user, &offset); + + if (ret) + break; + } + + return ret; +} + +static struct trace_event_fields user_event_fields_array[1]; + +static enum print_line_t user_event_print_trace(struct trace_iterator *iter, + int flags, + struct trace_event *event) +{ + /* Unsafe to try to decode user provided print_fmt, use hex */ + trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16, + 1, iter->ent, iter->ent_size, true); + + return trace_handle_return(&iter->seq); +} + +static struct trace_event_functions user_event_funcs = { + .trace = user_event_print_trace, +}; + +static int destroy_user_event(struct user_event *user) +{ + int ret = 0; + + /* Must destroy fields before call removal */ + user_event_destroy_fields(user); + + ret = trace_remove_event_call(&user->call); + + if (ret) + return ret; + + dyn_event_remove(&user->devent); + + register_page_data[user->index] = 0; + clear_bit(user->index, page_bitmap); + hash_del(&user->node); + + kfree(EVENT_NAME(user)); + kfree(user); + + return ret; +} + +static struct user_event *find_user_event(char *name, u32 *outkey) +{ + struct user_event *user; + u32 key = user_event_key(name); + + *outkey = key; + + hash_for_each_possible(register_table, user, node, key) + if (!strcmp(EVENT_NAME(user), name)) + return user; + + return NULL; +} + +/* + * Writes the user supplied payload out to a trace file. + */ +static void user_event_ftrace(struct user_event *user, void *data, u32 datalen, + void *tpdata) +{ + struct trace_event_file *file; + struct trace_entry *entry; + struct trace_event_buffer event_buffer; + + file = (struct trace_event_file *)tpdata; + + if (!file || + !(file->flags & EVENT_FILE_FL_ENABLED) || + trace_trigger_soft_disabled(file)) + return; + + /* Allocates and fills trace_entry, + 1 of this is data payload */ + entry = trace_event_buffer_reserve(&event_buffer, file, + sizeof(*entry) + datalen); + + if (unlikely(!entry)) + return; + + memcpy(entry + 1, data, datalen); + + trace_event_buffer_commit(&event_buffer); +} + +/* + * Update the register page that is shared between user processes. + */ +static void update_reg_page_for(struct user_event *user) +{ + struct tracepoint *tp = &user->tracepoint; + char status = 0; + + if (atomic_read(&tp->key.enabled) > 0) { + struct tracepoint_func *probe_func_ptr; + user_event_func_t probe_func; + + rcu_read_lock_sched(); + + probe_func_ptr = rcu_dereference_sched(tp->funcs); + + if (probe_func_ptr) { + do { + probe_func = probe_func_ptr->func; + + if (probe_func == user_event_ftrace) + status |= EVENT_STATUS_FTRACE; + else + status |= EVENT_STATUS_OTHER; + } while ((++probe_func_ptr)->func); + } + + rcu_read_unlock_sched(); + } + + register_page_data[user->index] = status; +} + +/* + * Register callback for our events from tracing sub-systems. + */ +static int user_event_reg(struct trace_event_call *call, + enum trace_reg type, + void *data) +{ + struct user_event *user = (struct user_event *)call->data; + int ret = 0; + + if (!user) + return -ENOENT; + + switch (type) { + case TRACE_REG_REGISTER: + ret = tracepoint_probe_register(call->tp, + call->class->probe, + data); + if (!ret) + goto inc; + break; + + case TRACE_REG_UNREGISTER: + tracepoint_probe_unregister(call->tp, + call->class->probe, + data); + goto dec; + + default: + break; + } + + return ret; +inc: + atomic_inc(&user->refcnt); + update_reg_page_for(user); + return 0; +dec: + update_reg_page_for(user); + atomic_dec(&user->refcnt); + return 0; +} + +static int user_event_create(const char *raw_command) +{ + struct user_event *user; + char *name; + int ret; + + if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX)) + return -ECANCELED; + + raw_command += USER_EVENTS_PREFIX_LEN; + raw_command = skip_spaces(raw_command); + + name = kstrdup(raw_command, GFP_KERNEL); + + if (!name) + return -ENOMEM; + + mutex_lock(®_mutex); + ret = user_event_parse_cmd(name, &user); + mutex_unlock(®_mutex); + + if (ret) + kfree(name); + + return ret; +} + +static int user_event_show(struct seq_file *m, struct dyn_event *ev) +{ + struct user_event *user = container_of(ev, struct user_event, devent); + struct ftrace_event_field *field, *next; + struct list_head *head; + int depth = 0; + + seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); + + head = trace_get_fields(&user->call); + + list_for_each_entry_safe_reverse(field, next, head, link) { + if (depth == 0) + seq_puts(m, " "); + else + seq_puts(m, "; "); + + seq_printf(m, "%s %s", field->type, field->name); + + if (str_has_prefix(field->type, "struct ")) + seq_printf(m, " %d", field->size); + + depth++; + } + + seq_puts(m, "\n"); + + return 0; +} + +static bool user_event_is_busy(struct dyn_event *ev) +{ + struct user_event *user = container_of(ev, struct user_event, devent); + + return atomic_read(&user->refcnt) != 0; +} + +static int user_event_free(struct dyn_event *ev) +{ + struct user_event *user = container_of(ev, struct user_event, devent); + + if (atomic_read(&user->refcnt) != 0) + return -EBUSY; + + return destroy_user_event(user); +} + +static bool user_event_match(const char *system, const char *event, + int argc, const char **argv, struct dyn_event *ev) +{ + struct user_event *user = container_of(ev, struct user_event, devent); + + return strcmp(EVENT_NAME(user), event) == 0 && + (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0); +} + +static struct dyn_event_operations user_event_dops = { + .create = user_event_create, + .show = user_event_show, + .is_busy = user_event_is_busy, + .free = user_event_free, + .match = user_event_match, +}; + +static int user_event_trace_register(struct user_event *user) +{ + int ret; + + ret = register_trace_event(&user->call.event); + + if (!ret) + return -ENODEV; + + ret = trace_add_event_call(&user->call); + + if (ret) + unregister_trace_event(&user->call.event); + + return ret; +} + +/* + * Parses the event name, arguments and flags then registers if successful. + * The name buffer lifetime is owned by this method for success cases only. + */ +static int user_event_parse(char *name, char *args, char *flags, + struct user_event **newuser) +{ + int ret; + int index; + u32 key; + struct user_event *user = find_user_event(name, &key); + + if (user) { + *newuser = user; + /* + * Name is allocated by caller, free it since it already exists. + * Caller only worries about failure cases for freeing. + */ + kfree(name); + return 0; + } + + index = find_first_zero_bit(page_bitmap, MAX_EVENTS); + + if (index == MAX_EVENTS) + return -EMFILE; + + user = kzalloc(sizeof(*user), GFP_KERNEL); + + if (!user) + return -ENOMEM; + + INIT_LIST_HEAD(&user->class.fields); + INIT_LIST_HEAD(&user->fields); + + user->tracepoint.name = name; + + user_event_parse_flags(user, flags); + + ret = user_event_parse_fields(user, args); + + if (ret) + goto put_user; + + /* Minimal print format */ + user->call.print_fmt = "\"\""; + + user->call.data = user; + user->call.class = &user->class; + user->call.name = name; + user->call.flags = TRACE_EVENT_FL_TRACEPOINT; + user->call.tp = &user->tracepoint; + user->call.event.funcs = &user_event_funcs; + + user->class.system = USER_EVENTS_SYSTEM; + user->class.fields_array = user_event_fields_array; + user->class.get_fields = user_event_get_fields; + user->class.reg = user_event_reg; + user->class.probe = user_event_ftrace; + + mutex_lock(&event_mutex); + ret = user_event_trace_register(user); + mutex_unlock(&event_mutex); + + if (ret) + goto put_user; + + user->index = index; + dyn_event_init(&user->devent, &user_event_dops); + dyn_event_add(&user->devent, &user->call); + set_bit(user->index, page_bitmap); + hash_add(register_table, &user->node, key); + + *newuser = user; + return 0; +put_user: + user_event_destroy_fields(user); + kfree(user); + return ret; +} + +/* + * Deletes a previously created event if it is no longer being used. + */ +static int delete_user_event(char *name) +{ + u32 key; + int ret; + struct user_event *user = find_user_event(name, &key); + + if (!user) + return -ENOENT; + + if (atomic_read(&user->refcnt) != 0) + return -EBUSY; + + mutex_lock(&event_mutex); + ret = destroy_user_event(user); + mutex_unlock(&event_mutex); + + return ret; +} + +/* + * Validates the user payload and writes via iterator. + */ +static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) +{ + struct user_event_refs *refs; + struct user_event *user = NULL; + struct tracepoint *tp; + ssize_t ret = i->count; + int idx; + + if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) + return -EFAULT; + + rcu_read_lock_sched(); + + refs = rcu_dereference_sched(file->private_data); + + /* + * The refs->events array is protected by RCU, and new items may be + * added. But the user retrieved from indexing into the events array + * shall be immutable while the file is opened. + */ + if (likely(refs && idx < refs->count)) + user = refs->events[idx]; + + rcu_read_unlock_sched(); + + if (unlikely(user == NULL)) + return -ENOENT; + + tp = &user->tracepoint; + + /* + * It's possible key.enabled disables after this check, however + * we don't mind if a few events are included in this condition. + */ + if (likely(atomic_read(&tp->key.enabled) > 0)) { + struct tracepoint_func *probe_func_ptr; + user_event_func_t probe_func; + void *tpdata; + void *kdata; + u32 datalen; + + kdata = kmalloc(i->count, GFP_KERNEL); + + if (unlikely(!kdata)) + return -ENOMEM; + + datalen = copy_from_iter(kdata, i->count, i); + + rcu_read_lock_sched(); + + probe_func_ptr = rcu_dereference_sched(tp->funcs); + + if (probe_func_ptr) { + do { + probe_func = probe_func_ptr->func; + tpdata = probe_func_ptr->data; + probe_func(user, kdata, datalen, tpdata); + } while ((++probe_func_ptr)->func); + } + + rcu_read_unlock_sched(); + + kfree(kdata); + } + + return ret; +} + +static ssize_t user_events_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct iovec iov; + struct iov_iter i; + + if (unlikely(*ppos != 0)) + return -EFAULT; + + if (unlikely(import_single_range(READ, (char *)ubuf, count, &iov, &i))) + return -EFAULT; + + return user_events_write_core(file, &i); +} + +static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) +{ + return user_events_write_core(kp->ki_filp, i); +} + +static int user_events_ref_add(struct file *file, struct user_event *user) +{ + struct user_event_refs *refs, *new_refs; + int i, size, count = 0; + + refs = rcu_dereference_protected(file->private_data, + lockdep_is_held(®_mutex)); + + if (refs) { + count = refs->count; + + for (i = 0; i < count; ++i) + if (refs->events[i] == user) + return i; + } + + size = struct_size(refs, events, count + 1); + + new_refs = kzalloc(size, GFP_KERNEL); + + if (!new_refs) + return -ENOMEM; + + new_refs->count = count + 1; + + for (i = 0; i < count; ++i) + new_refs->events[i] = refs->events[i]; + + new_refs->events[i] = user; + + atomic_inc(&user->refcnt); + + rcu_assign_pointer(file->private_data, new_refs); + + if (refs) + kfree_rcu(refs, rcu); + + return i; +} + +static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) +{ + u32 size; + long ret; + + ret = get_user(size, &ureg->size); + + if (ret) + return ret; + + if (size > PAGE_SIZE) + return -E2BIG; + + return copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); +} + +/* + * Registers a user_event on behalf of a user process. + */ +static long user_events_ioctl_reg(struct file *file, unsigned long uarg) +{ + struct user_reg __user *ureg = (struct user_reg __user *)uarg; + struct user_reg reg; + struct user_event *user; + char *name; + long ret; + + ret = user_reg_get(ureg, ®); + + if (ret) + return ret; + + name = strndup_user((const char __user *)(uintptr_t)reg.name_args, + MAX_EVENT_DESC); + + if (IS_ERR(name)) { + ret = PTR_ERR(name); + return ret; + } + + ret = user_event_parse_cmd(name, &user); + + if (ret) { + kfree(name); + return ret; + } + + ret = user_events_ref_add(file, user); + + /* Positive number is index and valid */ + if (ret < 0) + return ret; + + put_user((u32)ret, &ureg->write_index); + put_user(user->index, &ureg->status_index); + + return 0; +} + +/* + * Deletes a user_event on behalf of a user process. + */ +static long user_events_ioctl_del(struct file *file, unsigned long uarg) +{ + void __user *ubuf = (void __user *)uarg; + char *name; + long ret; + + name = strndup_user(ubuf, MAX_EVENT_DESC); + + if (IS_ERR(name)) + return PTR_ERR(name); + + ret = delete_user_event(name); + + kfree(name); + + return ret; +} + +/* + * Handles the ioctl from user mode to register or alter operations. + */ +static long user_events_ioctl(struct file *file, unsigned int cmd, + unsigned long uarg) +{ + long ret = -ENOTTY; + + switch (cmd) { + case DIAG_IOCSREG: + mutex_lock(®_mutex); + ret = user_events_ioctl_reg(file, uarg); + mutex_unlock(®_mutex); + break; + + case DIAG_IOCSDEL: + mutex_lock(®_mutex); + ret = user_events_ioctl_del(file, uarg); + mutex_unlock(®_mutex); + break; + } + + return ret; +} + +/* + * Handles the final close of the file from user mode. + */ +static int user_events_release(struct inode *node, struct file *file) +{ + struct user_event_refs *refs; + struct user_event *user; + int i; + + /* + * Ensure refs cannot change under any situation by taking the + * register mutex during the final freeing of the references. + */ + mutex_lock(®_mutex); + + refs = file->private_data; + + if (!refs) + goto out; + + /* + * The lifetime of refs has reached an end, it's tied to this file. + * The underlying user_events are ref counted, and cannot be freed. + * After this decrement, the user_events may be freed elsewhere. + */ + for (i = 0; i < refs->count; ++i) { + user = refs->events[i]; + + if (user) + atomic_dec(&user->refcnt); + } +out: + file->private_data = NULL; + + mutex_unlock(®_mutex); + + kfree(refs); + + return 0; +} + +static const struct file_operations user_data_fops = { + .write = user_events_write, + .write_iter = user_events_write_iter, + .unlocked_ioctl = user_events_ioctl, + .release = user_events_release, +}; + +/* + * Maps the shared page into the user process for checking if event is enabled. + */ +static int user_status_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long size = vma->vm_end - vma->vm_start; + + if (size != MAX_EVENTS) + return -EINVAL; + + return remap_pfn_range(vma, vma->vm_start, + virt_to_phys(register_page_data) >> PAGE_SHIFT, + size, vm_get_page_prot(VM_READ)); +} + +static void *user_seq_start(struct seq_file *m, loff_t *pos) +{ + if (*pos) + return NULL; + + return (void *)1; +} + +static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void user_seq_stop(struct seq_file *m, void *p) +{ +} + +static int user_seq_show(struct seq_file *m, void *p) +{ + struct user_event *user; + char status; + int i, active = 0, busy = 0, flags; + + mutex_lock(®_mutex); + + hash_for_each(register_table, i, user, node) { + status = register_page_data[user->index]; + flags = user->flags; + + seq_printf(m, "%d:%s", user->index, EVENT_NAME(user)); + + if (flags != 0 || status != 0) + seq_puts(m, " #"); + + if (status != 0) { + seq_puts(m, " Used by"); + if (status & EVENT_STATUS_FTRACE) + seq_puts(m, " ftrace"); + if (status & EVENT_STATUS_PERF) + seq_puts(m, " perf"); + if (status & EVENT_STATUS_OTHER) + seq_puts(m, " other"); + busy++; + } + + if (flags & FLAG_BPF_ITER) + seq_puts(m, " FLAG:BPF_ITER"); + + seq_puts(m, "\n"); + active++; + } + + mutex_unlock(®_mutex); + + seq_puts(m, "\n"); + seq_printf(m, "Active: %d\n", active); + seq_printf(m, "Busy: %d\n", busy); + seq_printf(m, "Max: %ld\n", MAX_EVENTS); + + return 0; +} + +static const struct seq_operations user_seq_ops = { + .start = user_seq_start, + .next = user_seq_next, + .stop = user_seq_stop, + .show = user_seq_show, +}; + +static int user_status_open(struct inode *node, struct file *file) +{ + return seq_open(file, &user_seq_ops); +} + +static const struct file_operations user_status_fops = { + .open = user_status_open, + .mmap = user_status_mmap, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Creates a set of tracefs files to allow user mode interactions. + */ +static int create_user_tracefs(void) +{ + struct dentry *edata, *emmap; + + edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE, + NULL, NULL, &user_data_fops); + + if (!edata) { + pr_warn("Could not create tracefs 'user_events_data' entry\n"); + goto err; + } + + /* mmap with MAP_SHARED requires writable fd */ + emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE, + NULL, NULL, &user_status_fops); + + if (!emmap) { + tracefs_remove(edata); + pr_warn("Could not create tracefs 'user_events_mmap' entry\n"); + goto err; + } + + return 0; +err: + return -ENODEV; +} + +static void set_page_reservations(bool set) +{ + int page; + + for (page = 0; page < MAX_PAGES; ++page) { + void *addr = register_page_data + (PAGE_SIZE * page); + + if (set) + SetPageReserved(virt_to_page(addr)); + else + ClearPageReserved(virt_to_page(addr)); + } +} + +static int __init trace_events_user_init(void) +{ + int ret; + + /* Zero all bits beside 0 (which is reserved for failures) */ + bitmap_zero(page_bitmap, MAX_EVENTS); + set_bit(0, page_bitmap); + + register_page_data = kzalloc(MAX_EVENTS, GFP_KERNEL); + + if (!register_page_data) + return -ENOMEM; + + set_page_reservations(true); + + ret = create_user_tracefs(); + + if (ret) { + pr_warn("user_events could not register with tracefs\n"); + set_page_reservations(false); + kfree(register_page_data); + return ret; + } + + if (dyn_event_register(&user_event_dops)) + pr_warn("user_events could not register with dyn_events\n"); + + return 0; +} + +fs_initcall(trace_events_user_init); -- cgit v1.2.3-71-gd317 From a6a6fe27bab48f0d09a64b051e7bde432fcae081 Mon Sep 17 00:00:00 2001 From: "D. Wythe" Date: Thu, 10 Feb 2022 17:11:37 +0800 Subject: net/smc: Dynamic control handshake limitation by socket options This patch aims to add dynamic control for SMC handshake limitation for every smc sockets, in production environment, it is possible for the same applications to handle different service types, and may have different opinion on SMC handshake limitation. This patch try socket options to complete it, since we don't have socket option level for SMC yet, which requires us to implement it at the same time. This patch does the following: - add new socket option level: SOL_SMC. - add new SMC socket option: SMC_LIMIT_HS. - provide getter/setter for SMC socket options. Link: https://lore.kernel.org/all/20f504f961e1a803f85d64229ad84260434203bd.1644323503.git.alibuda@linux.alibaba.com/ Signed-off-by: D. Wythe Signed-off-by: David S. Miller --- include/linux/socket.h | 1 + include/uapi/linux/smc.h | 4 +++ net/smc/af_smc.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc.h | 1 + 4 files changed, 74 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/linux/socket.h b/include/linux/socket.h index 8ef26d89ef49..6f85f5d957ef 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -366,6 +366,7 @@ struct ucred { #define SOL_XDP 283 #define SOL_MPTCP 284 #define SOL_MCTP 285 +#define SOL_SMC 286 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 6c2874fd2c00..343e7450c3a3 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -284,4 +284,8 @@ enum { __SMC_NLA_SEID_TABLE_MAX, SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1 }; + +/* SMC socket options */ +#define SMC_LIMIT_HS 1 /* constraint on smc handshake */ + #endif /* _UAPI_LINUX_SMC_H */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index a05ffb268c3e..97dcdc0a2107 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2326,7 +2326,8 @@ static int smc_listen(struct socket *sock, int backlog) inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; + if (smc->limit_smc_hs) + tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; rc = kernel_listen(smc->clcsock, backlog); if (rc) { @@ -2621,6 +2622,67 @@ out: return rc ? rc : rc1; } +static int __smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct smc_sock *smc; + int val, len; + + smc = smc_sk(sock->sk); + + if (get_user(len, optlen)) + return -EFAULT; + + len = min_t(int, len, sizeof(int)); + + if (len < 0) + return -EINVAL; + + switch (optname) { + case SMC_LIMIT_HS: + val = smc->limit_smc_hs; + break; + default: + return -EOPNOTSUPP; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +static int __smc_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct smc_sock *smc; + int val, rc; + + smc = smc_sk(sk); + + lock_sock(sk); + switch (optname) { + case SMC_LIMIT_HS: + if (optlen < sizeof(int)) + return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(int))) + return -EFAULT; + + smc->limit_smc_hs = !!val; + rc = 0; + break; + default: + rc = -EOPNOTSUPP; + break; + } + release_sock(sk); + + return rc; +} + static int smc_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { @@ -2630,6 +2692,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (level == SOL_TCP && optname == TCP_ULP) return -EOPNOTSUPP; + else if (level == SOL_SMC) + return __smc_setsockopt(sock, level, optname, optval, optlen); smc = smc_sk(sk); @@ -2712,6 +2776,9 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, struct smc_sock *smc; int rc; + if (level == SOL_SMC) + return __smc_getsockopt(sock, level, optname, optval, optlen); + smc = smc_sk(sock->sk); mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { diff --git a/net/smc/smc.h b/net/smc/smc.h index e91e40040d07..7e2693832a1b 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -249,6 +249,7 @@ struct smc_sock { /* smc sock container */ struct work_struct smc_listen_work;/* prepare new accept socket */ struct list_head accept_q; /* sockets to be accepted */ spinlock_t accept_q_lock; /* protects accept_q */ + bool limit_smc_hs; /* put constraint on handshake */ bool use_fallback; /* fallback to tcp */ int fallback_rsn; /* reason for fallback */ u32 peer_diagnosis; /* decline reason from peer */ -- cgit v1.2.3-71-gd317 From f9496b7c1b48ce02cd17a3ee88b1e049c689a222 Mon Sep 17 00:00:00 2001 From: "D. Wythe" Date: Thu, 10 Feb 2022 17:11:38 +0800 Subject: net/smc: Add global configure for handshake limitation by netlink Although we can control SMC handshake limitation through socket options, which means that applications who need it must modify their code. It's quite troublesome for many existing applications. This patch modifies the global default value of SMC handshake limitation through netlink, providing a way to put constraint on handshake without modifies any code for applications. Suggested-by: Tony Lu Signed-off-by: D. Wythe Reviewed-by: Tony Lu Signed-off-by: David S. Miller --- include/net/netns/smc.h | 2 ++ include/uapi/linux/smc.h | 11 +++++++++++ net/smc/af_smc.c | 42 ++++++++++++++++++++++++++++++++++++++++++ net/smc/smc.h | 6 ++++++ net/smc/smc_netlink.c | 15 +++++++++++++++ net/smc/smc_pnet.c | 3 +++ 6 files changed, 79 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index ea8a9cf2619b..47b166684fd8 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -12,5 +12,7 @@ struct netns_smc { /* protect fback_rsn */ struct mutex mutex_fback_rsn; struct smc_stats_rsn *fback_rsn; + + bool limit_smc_hs; /* constraint on handshake */ }; #endif diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 343e7450c3a3..693f549f6966 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -59,6 +59,9 @@ enum { SMC_NETLINK_DUMP_SEID, SMC_NETLINK_ENABLE_SEID, SMC_NETLINK_DISABLE_SEID, + SMC_NETLINK_DUMP_HS_LIMITATION, + SMC_NETLINK_ENABLE_HS_LIMITATION, + SMC_NETLINK_DISABLE_HS_LIMITATION, }; /* SMC_GENL_FAMILY top level attributes */ @@ -285,6 +288,14 @@ enum { SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1 }; +/* SMC_NETLINK_HS_LIMITATION attributes */ +enum { + SMC_NLA_HS_LIMITATION_UNSPEC, + SMC_NLA_HS_LIMITATION_ENABLED, /* u8 */ + __SMC_NLA_HS_LIMITATION_MAX, + SMC_NLA_HS_LIMITATION_MAX = __SMC_NLA_HS_LIMITATION_MAX - 1 +}; + /* SMC socket options */ #define SMC_LIMIT_HS 1 /* constraint on smc handshake */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 97dcdc0a2107..246c874de629 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -66,6 +66,45 @@ struct workqueue_struct *smc_close_wq; /* wq for close work */ static void smc_tcp_listen_work(struct work_struct *); static void smc_connect_work(struct work_struct *); +int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); + void *hdr; + + if (cb_ctx->pos[0]) + goto out; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &smc_gen_nl_family, NLM_F_MULTI, + SMC_NETLINK_DUMP_HS_LIMITATION); + if (!hdr) + return -ENOMEM; + + if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED, + sock_net(skb->sk)->smc.limit_smc_hs)) + goto err; + + genlmsg_end(skb, hdr); + cb_ctx->pos[0] = 1; +out: + return skb->len; +err: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info) +{ + sock_net(skb->sk)->smc.limit_smc_hs = true; + return 0; +} + +int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info) +{ + sock_net(skb->sk)->smc.limit_smc_hs = false; + return 0; +} + static void smc_set_keepalive(struct sock *sk, int val) { struct smc_sock *smc = smc_sk(sk); @@ -3007,6 +3046,9 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol, smc->use_fallback = false; /* assume rdma capability first */ smc->fallback_rsn = 0; + /* default behavior from limit_smc_hs in every net namespace */ + smc->limit_smc_hs = net->smc.limit_smc_hs; + rc = 0; if (!clcsock) { rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, diff --git a/net/smc/smc.h b/net/smc/smc.h index 7e2693832a1b..a096d8af21a0 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -14,6 +14,7 @@ #include #include #include /* __aligned */ +#include #include #include "smc_ib.h" @@ -336,4 +337,9 @@ void smc_fill_gid_list(struct smc_link_group *lgr, struct smc_gidlist *gidlist, struct smc_ib_device *known_dev, u8 *known_gid); +/* smc handshake limitation interface for netlink */ +int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb); +int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info); +int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info); + #endif /* __SMC_H */ diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c index f13ab0661ed5..c5a62f6f52ba 100644 --- a/net/smc/smc_netlink.c +++ b/net/smc/smc_netlink.c @@ -111,6 +111,21 @@ static const struct genl_ops smc_gen_nl_ops[] = { .flags = GENL_ADMIN_PERM, .doit = smc_nl_disable_seid, }, + { + .cmd = SMC_NETLINK_DUMP_HS_LIMITATION, + /* can be retrieved by unprivileged users */ + .dumpit = smc_nl_dump_hs_limitation, + }, + { + .cmd = SMC_NETLINK_ENABLE_HS_LIMITATION, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_enable_hs_limitation, + }, + { + .cmd = SMC_NETLINK_DISABLE_HS_LIMITATION, + .flags = GENL_ADMIN_PERM, + .doit = smc_nl_disable_hs_limitation, + }, }; static const struct nla_policy smc_gen_nl_policy[2] = { diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 0599246c0376..ff61b7b95875 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -870,6 +870,9 @@ int smc_pnet_net_init(struct net *net) smc_pnet_create_pnetids_list(net); + /* disable handshake limitation by default */ + net->smc.limit_smc_hs = 0; + return 0; } -- cgit v1.2.3-71-gd317 From e9e9feebcbc14b174fef862842f8cc9a388e1db3 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 11 Feb 2022 19:22:10 +0100 Subject: KVM: s390: Add optional storage key checking to MEMOP IOCTL User space needs a mechanism to perform key checked accesses when emulating instructions. The key can be passed as an additional argument. Having an additional argument is flexible, as user space can pass the guest PSW's key, in order to make an access the same way the CPU would, or pass another key if necessary. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Claudio Imbrenda Reviewed-by: Christian Borntraeger Reviewed-by: Janosch Frank Link: https://lore.kernel.org/r/20220211182215.2730017-6-scgl@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 31 +++++++++++++++++++++---------- include/uapi/linux/kvm.h | 6 +++++- 2 files changed, 26 insertions(+), 11 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index fdbd6c1dc709..c31b40abfa23 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2359,6 +2359,11 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) return r; } +static bool access_key_invalid(u8 access_key) +{ + return access_key > 0xf; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4692,17 +4697,21 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, void *tmpbuf = NULL; int r = 0; const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION - | KVM_S390_MEMOP_F_CHECK_ONLY; + | KVM_S390_MEMOP_F_CHECK_ONLY + | KVM_S390_MEMOP_F_SKEY_PROTECTION; if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) return -EINVAL; - if (mop->size > MEM_OP_MAX_SIZE) return -E2BIG; - if (kvm_s390_pv_cpu_is_protected(vcpu)) return -EINVAL; - + if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { + if (access_key_invalid(mop->key)) + return -EINVAL; + } else { + mop->key = 0; + } if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { tmpbuf = vmalloc(mop->size); if (!tmpbuf) @@ -4712,11 +4721,12 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, switch (mop->op) { case KVM_S390_MEMOP_LOGICAL_READ: if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { - r = check_gva_range(vcpu, mop->gaddr, mop->ar, - mop->size, GACC_FETCH, 0); + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, + GACC_FETCH, mop->key); break; } - r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, + mop->size, mop->key); if (r == 0) { if (copy_to_user(uaddr, tmpbuf, mop->size)) r = -EFAULT; @@ -4724,15 +4734,16 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, break; case KVM_S390_MEMOP_LOGICAL_WRITE: if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { - r = check_gva_range(vcpu, mop->gaddr, mop->ar, - mop->size, GACC_STORE, 0); + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, + GACC_STORE, mop->key); break; } if (copy_from_user(tmpbuf, uaddr, mop->size)) { r = -EFAULT; break; } - r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, + mop->size, mop->key); break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5191b57e1562..4566f429db2c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -562,7 +562,10 @@ struct kvm_s390_mem_op { __u32 op; /* type of operation */ __u64 buf; /* buffer in userspace */ union { - __u8 ar; /* the access register number */ + struct { + __u8 ar; /* the access register number */ + __u8 key; /* access key, ignored if flag unset */ + }; __u32 sida_offset; /* offset into the sida */ __u8 reserved[32]; /* should be set to 0 */ }; @@ -575,6 +578,7 @@ struct kvm_s390_mem_op { /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) +#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) /* for KVM_INTERRUPT */ struct kvm_interrupt { -- cgit v1.2.3-71-gd317 From ef11c9463ae006302ce170a401854a48ea0532ca Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 11 Feb 2022 19:22:11 +0100 Subject: KVM: s390: Add vm IOCTL for key checked guest absolute memory access Channel I/O honors storage keys and is performed on absolute memory. For I/O emulation user space therefore needs to be able to do key checked accesses. The vm IOCTL supports read/write accesses, as well as checking if an access would succeed. Unlike relying on KVM_S390_GET_SKEYS for key checking would, the vm IOCTL performs the check in lockstep with the read or write, by, ultimately, mapping the access to move instructions that support key protection checking with a supplied key. Fetch and storage protection override are not applicable to absolute accesses and so are not applied as they are when using the vcpu memop. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20220211182215.2730017-7-scgl@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/gaccess.c | 72 ++++++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/gaccess.h | 6 ++++ arch/s390/kvm/kvm-s390.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 4 files changed, 161 insertions(+) (limited to 'include/uapi/linux') diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 37838f637707..d53a183c2005 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -795,6 +795,35 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, return 1; } +static int vm_check_access_key(struct kvm *kvm, u8 access_key, + enum gacc_mode mode, gpa_t gpa) +{ + u8 storage_key, access_control; + bool fetch_protected; + unsigned long hva; + int r; + + if (access_key == 0) + return 0; + + hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + if (kvm_is_error_hva(hva)) + return PGM_ADDRESSING; + + mmap_read_lock(current->mm); + r = get_guest_storage_key(current->mm, hva, &storage_key); + mmap_read_unlock(current->mm); + if (r) + return r; + access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key); + if (access_control == access_key) + return 0; + fetch_protected = storage_key & _PAGE_FP_BIT; + if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected) + return 0; + return PGM_PROTECTION; +} + static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode, union asce asce) { @@ -994,6 +1023,26 @@ access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa, return 0; } +int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len, enum gacc_mode mode, u8 access_key) +{ + int offset = offset_in_page(gpa); + int fragment_len; + int rc; + + while (min(PAGE_SIZE - offset, len) > 0) { + fragment_len = min(PAGE_SIZE - offset, len); + rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key); + if (rc) + return rc; + offset = 0; + len -= fragment_len; + data += fragment_len; + gpa += fragment_len; + } + return 0; +} + int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, unsigned long len, enum gacc_mode mode, u8 access_key) @@ -1144,6 +1193,29 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, return rc; } +/** + * check_gpa_range - test a range of guest physical addresses for accessibility + * @kvm: virtual machine instance + * @gpa: guest physical address + * @length: length of test range + * @mode: access mode to test, relevant for storage keys + * @access_key: access key to mach the storage keys with + */ +int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, + enum gacc_mode mode, u8 access_key) +{ + unsigned int fragment_len; + int rc = 0; + + while (length && !rc) { + fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length); + rc = vm_check_access_key(kvm, access_key, mode, gpa); + length -= fragment_len; + gpa += fragment_len; + } + return rc; +} + /** * kvm_s390_check_low_addr_prot_real - check for low-address protection * @vcpu: virtual cpu diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index c5f2e7311b17..1124ff282012 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -193,6 +193,12 @@ int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, unsigned long length, enum gacc_mode mode, u8 access_key); +int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length, + enum gacc_mode mode, u8 access_key); + +int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data, + unsigned long len, enum gacc_mode mode, u8 access_key); + int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, unsigned long len, enum gacc_mode mode, u8 access_key); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c31b40abfa23..36bc73b5f5de 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2364,6 +2364,78 @@ static bool access_key_invalid(u8 access_key) return access_key > 0xf; } +static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) +{ + void __user *uaddr = (void __user *)mop->buf; + u64 supported_flags; + void *tmpbuf = NULL; + int r, srcu_idx; + + supported_flags = KVM_S390_MEMOP_F_SKEY_PROTECTION + | KVM_S390_MEMOP_F_CHECK_ONLY; + if (mop->flags & ~supported_flags) + return -EINVAL; + if (mop->size > MEM_OP_MAX_SIZE) + return -E2BIG; + if (kvm_s390_pv_is_protected(kvm)) + return -EINVAL; + if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { + if (access_key_invalid(mop->key)) + return -EINVAL; + } else { + mop->key = 0; + } + if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { + tmpbuf = vmalloc(mop->size); + if (!tmpbuf) + return -ENOMEM; + } + + srcu_idx = srcu_read_lock(&kvm->srcu); + + if (kvm_is_error_gpa(kvm, mop->gaddr)) { + r = PGM_ADDRESSING; + goto out_unlock; + } + + switch (mop->op) { + case KVM_S390_MEMOP_ABSOLUTE_READ: { + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_FETCH, mop->key); + } else { + r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, + mop->size, GACC_FETCH, mop->key); + if (r == 0) { + if (copy_to_user(uaddr, tmpbuf, mop->size)) + r = -EFAULT; + } + } + break; + } + case KVM_S390_MEMOP_ABSOLUTE_WRITE: { + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gpa_range(kvm, mop->gaddr, mop->size, GACC_STORE, mop->key); + } else { + if (copy_from_user(tmpbuf, uaddr, mop->size)) { + r = -EFAULT; + break; + } + r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, + mop->size, GACC_STORE, mop->key); + } + break; + } + default: + r = -EINVAL; + } + +out_unlock: + srcu_read_unlock(&kvm->srcu, srcu_idx); + + vfree(tmpbuf); + return r; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2488,6 +2560,15 @@ long kvm_arch_vm_ioctl(struct file *filp, } break; } + case KVM_S390_MEM_OP: { + struct kvm_s390_mem_op mem_op; + + if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) + r = kvm_s390_vm_mem_op(kvm, &mem_op); + else + r = -EFAULT; + break; + } default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4566f429db2c..4bc7623def87 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -575,6 +575,8 @@ struct kvm_s390_mem_op { #define KVM_S390_MEMOP_LOGICAL_WRITE 1 #define KVM_S390_MEMOP_SIDA_READ 2 #define KVM_S390_MEMOP_SIDA_WRITE 3 +#define KVM_S390_MEMOP_ABSOLUTE_READ 4 +#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) -- cgit v1.2.3-71-gd317 From d004079edc166ff19605475211305923c708b4d5 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 11 Feb 2022 19:22:13 +0100 Subject: KVM: s390: Add capability for storage key extension of MEM_OP IOCTL Availability of the KVM_CAP_S390_MEM_OP_EXTENSION capability signals that: * The vcpu MEM_OP IOCTL supports storage key checking. * The vm MEM_OP IOCTL exists. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20220211182215.2730017-9-scgl@linux.ibm.com Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 1 + include/uapi/linux/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/uapi/linux') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 773bccdd446c..c2c26c2aad64 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -564,6 +564,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_VCPU_RESETS: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_S390_DIAG318: + case KVM_CAP_S390_MEM_OP_EXTENSION: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4bc7623def87..08756eeea065 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1140,6 +1140,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_GPA_BITS 207 #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 +#define KVM_CAP_S390_MEM_OP_EXTENSION 210 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From 5e35d0eb472b48ac9c8ef7017753b8a1f765aa01 Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 11 Feb 2022 19:22:14 +0100 Subject: KVM: s390: Update api documentation for memop ioctl Document all currently existing operations, flags and explain under which circumstances they are available. Document the recently introduced absolute operations and the storage key protection flag, as well as the existing SIDA operations. Signed-off-by: Janis Schoetterl-Glausch Reviewed-by: Janosch Frank Link: https://lore.kernel.org/r/20220211182215.2730017-10-scgl@linux.ibm.com Signed-off-by: Christian Borntraeger --- Documentation/virt/kvm/api.rst | 112 +++++++++++++++++++++++++++++++++-------- include/uapi/linux/kvm.h | 2 +- 2 files changed, 91 insertions(+), 23 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a4267104db50..48f23bb80d7f 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -3683,15 +3683,17 @@ The fields in each entry are defined as follows: 4.89 KVM_S390_MEM_OP -------------------- -:Capability: KVM_CAP_S390_MEM_OP +:Capability: KVM_CAP_S390_MEM_OP, KVM_CAP_S390_PROTECTED, KVM_CAP_S390_MEM_OP_EXTENSION :Architectures: s390 -:Type: vcpu ioctl +:Type: vm ioctl, vcpu ioctl :Parameters: struct kvm_s390_mem_op (in) :Returns: = 0 on success, < 0 on generic error (e.g. -EFAULT or -ENOMEM), > 0 if an exception occurred while walking the page tables -Read or write data from/to the logical (virtual) memory of a VCPU. +Read or write data from/to the VM's memory. +The KVM_CAP_S390_MEM_OP_EXTENSION capability specifies what functionality is +supported. Parameters are specified via the following structure:: @@ -3701,33 +3703,99 @@ Parameters are specified via the following structure:: __u32 size; /* amount of bytes */ __u32 op; /* type of operation */ __u64 buf; /* buffer in userspace */ - __u8 ar; /* the access register number */ - __u8 reserved[31]; /* should be set to 0 */ + union { + struct { + __u8 ar; /* the access register number */ + __u8 key; /* access key, ignored if flag unset */ + }; + __u32 sida_offset; /* offset into the sida */ + __u8 reserved[32]; /* ignored */ + }; }; -The type of operation is specified in the "op" field. It is either -KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or -KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The -KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check -whether the corresponding memory access would create an access exception -(without touching the data in the memory at the destination). In case an -access exception occurred while walking the MMU tables of the guest, the -ioctl returns a positive error number to indicate the type of exception. -This exception is also raised directly at the corresponding VCPU if the -flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. - The start address of the memory region has to be specified in the "gaddr" field, and the length of the region in the "size" field (which must not be 0). The maximum value for "size" can be obtained by checking the KVM_CAP_S390_MEM_OP capability. "buf" is the buffer supplied by the userspace application where the read data should be written to for -KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written is -stored for a KVM_S390_MEMOP_LOGICAL_WRITE. When KVM_S390_MEMOP_F_CHECK_ONLY -is specified, "buf" is unused and can be NULL. "ar" designates the access -register number to be used; the valid range is 0..15. +a read access, or where the data that should be written is stored for +a write access. The "reserved" field is meant for future extensions. +Reserved and unused values are ignored. Future extension that add members must +introduce new flags. + +The type of operation is specified in the "op" field. Flags modifying +their behavior can be set in the "flags" field. Undefined flag bits must +be set to 0. + +Possible operations are: + * ``KVM_S390_MEMOP_LOGICAL_READ`` + * ``KVM_S390_MEMOP_LOGICAL_WRITE`` + * ``KVM_S390_MEMOP_ABSOLUTE_READ`` + * ``KVM_S390_MEMOP_ABSOLUTE_WRITE`` + * ``KVM_S390_MEMOP_SIDA_READ`` + * ``KVM_S390_MEMOP_SIDA_WRITE`` + +Logical read/write: +^^^^^^^^^^^^^^^^^^^ + +Access logical memory, i.e. translate the given guest address to an absolute +address given the state of the VCPU and use the absolute address as target of +the access. "ar" designates the access register number to be used; the valid +range is 0..15. +Logical accesses are permitted for the VCPU ioctl only. +Logical accesses are permitted for non-protected guests only. + +Supported flags: + * ``KVM_S390_MEMOP_F_CHECK_ONLY`` + * ``KVM_S390_MEMOP_F_INJECT_EXCEPTION`` + * ``KVM_S390_MEMOP_F_SKEY_PROTECTION`` + +The KVM_S390_MEMOP_F_CHECK_ONLY flag can be set to check whether the +corresponding memory access would cause an access exception; however, +no actual access to the data in memory at the destination is performed. +In this case, "buf" is unused and can be NULL. + +In case an access exception occurred during the access (or would occur +in case of KVM_S390_MEMOP_F_CHECK_ONLY), the ioctl returns a positive +error number indicating the type of exception. This exception is also +raised directly at the corresponding VCPU if the flag +KVM_S390_MEMOP_F_INJECT_EXCEPTION is set. + +If the KVM_S390_MEMOP_F_SKEY_PROTECTION flag is set, storage key +protection is also in effect and may cause exceptions if accesses are +prohibited given the access key passed in "key". +KVM_S390_MEMOP_F_SKEY_PROTECTION is available if KVM_CAP_S390_MEM_OP_EXTENSION +is > 0. + +Absolute read/write: +^^^^^^^^^^^^^^^^^^^^ + +Access absolute memory. This operation is intended to be used with the +KVM_S390_MEMOP_F_SKEY_PROTECTION flag, to allow accessing memory and performing +the checks required for storage key protection as one operation (as opposed to +user space getting the storage keys, performing the checks, and accessing +memory thereafter, which could lead to a delay between check and access). +Absolute accesses are permitted for the VM ioctl if KVM_CAP_S390_MEM_OP_EXTENSION +is > 0. +Currently absolute accesses are not permitted for VCPU ioctls. +Absolute accesses are permitted for non-protected guests only. + +Supported flags: + * ``KVM_S390_MEMOP_F_CHECK_ONLY`` + * ``KVM_S390_MEMOP_F_SKEY_PROTECTION`` + +The semantics of the flags are as for logical accesses. + +SIDA read/write: +^^^^^^^^^^^^^^^^ + +Access the secure instruction data area which contains memory operands necessary +for instruction emulation for protected guests. +SIDA accesses are available if the KVM_CAP_S390_PROTECTED capability is available. +SIDA accesses are permitted for the VCPU ioctl only. +SIDA accesses are permitted for protected guests only. -The "reserved" field is meant for future extensions. It is not used by -KVM with the currently defined set of flags. +No flags are supported. 4.90 KVM_S390_GET_SKEYS ----------------------- diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 08756eeea065..dbc550bbd9fa 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -567,7 +567,7 @@ struct kvm_s390_mem_op { __u8 key; /* access key, ignored if flag unset */ }; __u32 sida_offset; /* offset into the sida */ - __u8 reserved[32]; /* should be set to 0 */ + __u8 reserved[32]; /* ignored */ }; }; /* types for kvm_s390_mem_op->op */ -- cgit v1.2.3-71-gd317 From 761b9b366cec0c81a1cd80930f00611d86521d1b Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 31 Jan 2022 16:54:53 +0000 Subject: elf: Introduce the ARM MTE ELF segment type Memory tags will be dumped in the core file as segments with their own type. Discussions with the binutils and the generic ABI community settled on using new definitions in the PT_*PROC space (and to be documented in the processor-specific ABIs). Introduce PT_ARM_MEMTAG_MTE as (PT_LOPROC + 0x1). Not included in this patch since there is no upstream support but the CHERI/BSD community will also reserve: #define PT_ARM_MEMTAG_CHERI (PT_LOPROC + 0x2) #define PT_RISCV_MEMTAG_CHERI (PT_LOPROC + 0x3) Signed-off-by: Catalin Marinas Acked-by: Luis Machado Link: https://lore.kernel.org/r/20220131165456.2160675-3-catalin.marinas@arm.com Signed-off-by: Will Deacon --- include/uapi/linux/elf.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 61bf4774b8f2..fe8e5b74cb39 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -40,6 +40,9 @@ typedef __s64 Elf64_Sxword; #define PT_GNU_STACK (PT_LOOS + 0x474e551) +/* ARM MTE memory tag segment type */ +#define PT_ARM_MEMTAG_MTE (PT_LOPROC + 0x1) + /* * Extended Numbering * -- cgit v1.2.3-71-gd317 From 5cd5a8a3e2fb11b1c8a09f062c44c1e228ef987a Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 14 Feb 2022 17:29:53 +0100 Subject: cfg80211: Add data structures to capture EHT capabilities And advertise EHT capabilities to user space when supported. Signed-off-by: Ilan Peer Link: https://lore.kernel.org/r/20220214173004.6fb70658529f.I2413a37c8f7d2d6d638038a3d95360a3fce0114d@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 63 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/nl80211.h | 12 +++++++++ net/wireless/nl80211.c | 27 +++++++++++++++++++ 3 files changed, 102 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f6db085ff3df..7bec15f605be 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -360,6 +360,48 @@ struct ieee80211_sta_he_cap { u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN]; }; +/** + * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS + * + * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS + * and NSS Set field" + * + * @only_20mhz: MCS/NSS support for 20 MHz-only STA. + * @bw._80: MCS/NSS support for BW <= 80 MHz + * @bw._160: MCS/NSS support for BW = 160 MHz + * @bw._320: MCS/NSS support for BW = 320 MHz + */ +struct ieee80211_eht_mcs_nss_supp { + union { + struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; + struct { + struct ieee80211_eht_mcs_nss_supp_bw _80; + struct ieee80211_eht_mcs_nss_supp_bw _160; + struct ieee80211_eht_mcs_nss_supp_bw _320; + } __packed bw; + } __packed; +} __packed; + +#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32 + +/** + * struct ieee80211_sta_eht_cap - STA's EHT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11be EHT capabilities for a STA. + * + * @has_eht: true iff EHT data is valid. + * @eht_cap_elem: Fixed portion of the eht capabilities element. + * @eht_mcs_nss_supp: The supported NSS/MCS combinations. + * @eht_ppe_thres: Holds the PPE Thresholds data. + */ +struct ieee80211_sta_eht_cap { + bool has_eht; + struct ieee80211_eht_cap_elem_fixed eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; + u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN]; +}; + /** * struct ieee80211_sband_iftype_data - sband data per interface type * @@ -379,6 +421,7 @@ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; struct { const u8 *data; unsigned int len; @@ -561,6 +604,26 @@ ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband, return data->he_6ghz_capa.capa; } +/** + * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype + * @sband: the sband to search for the iftype on + * @iftype: enum nl80211_iftype + * + * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found + */ +static inline const struct ieee80211_sta_eht_cap * +ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband, + enum nl80211_iftype iftype) +{ + const struct ieee80211_sband_iftype_data *data = + ieee80211_get_sband_iftype_data(sband, iftype); + + if (data && data->eht_cap.has_eht) + return &data->eht_cap; + + return NULL; +} + /** * wiphy_read_of_freq_limits - read frequency limits from device tree * diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 195a238a322e..d305a8b8c536 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3766,6 +3766,14 @@ enum nl80211_mpath_info { * given for all 6 GHz band channels * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are * advertised on this band/for this iftype (binary) + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC: EHT MAC capabilities as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY: EHT PHY capabilities as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET: EHT supported NSS/MCS as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as + * defined in EHT capabilities element * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined */ @@ -3779,6 +3787,10 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5c49d7c4bcae..2dfd45f35486 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1737,6 +1737,7 @@ nl80211_send_iftype_data(struct sk_buff *msg, const struct ieee80211_sband_iftype_data *iftdata) { const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap; + const struct ieee80211_sta_eht_cap *eht_cap = &iftdata->eht_cap; if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES, iftdata->types_mask)) @@ -1757,6 +1758,32 @@ nl80211_send_iftype_data(struct sk_buff *msg, return -ENOBUFS; } + if (eht_cap->has_eht && he_cap->has_he) { + u8 mcs_nss_size, ppe_thresh_size; + u16 ppe_thres_hdr; + + mcs_nss_size = + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem); + + ppe_thres_hdr = get_unaligned_le16(&eht_cap->eht_ppe_thres[0]); + ppe_thresh_size = + ieee80211_eht_ppe_size(ppe_thres_hdr, + eht_cap->eht_cap_elem.phy_cap_info); + + if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC, + sizeof(eht_cap->eht_cap_elem.mac_cap_info), + eht_cap->eht_cap_elem.mac_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY, + sizeof(eht_cap->eht_cap_elem.phy_cap_info), + eht_cap->eht_cap_elem.phy_cap_info) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET, + mcs_nss_size, &eht_cap->eht_mcs_nss_supp) || + nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE, + ppe_thresh_size, eht_cap->eht_ppe_thres)) + return -ENOBUFS; + } + if (sband->band == NL80211_BAND_6GHZ && nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, sizeof(iftdata->he_6ghz_capa), -- cgit v1.2.3-71-gd317 From 3743bec6120ae0748cb3fda6ff80a690117ef1f3 Mon Sep 17 00:00:00 2001 From: Jia Ding Date: Mon, 14 Feb 2022 17:29:54 +0100 Subject: cfg80211: Add support for EHT 320 MHz channel width Add 320 MHz support in the channel def and center frequency validation with compatible check. Signed-off-by: Jia Ding Co-authored-by: Karthikeyan Periyasamy Signed-off-by: Karthikeyan Periyasamy Co-authored-by: Muna Sinada Signed-off-by: Muna Sinada Co-authored-by: Veerendranath Jakkam Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/1640163883-12696-5-git-send-email-quic_vjakkam@quicinc.com Link: https://lore.kernel.org/r/20220214163009.175289-1-johannes@sipsolutions.net Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 2 + include/net/cfg80211.h | 7 ++- include/uapi/linux/nl80211.h | 3 ++ net/wireless/chan.c | 91 ++++++++++++++++++++++++++++++++--- net/wireless/nl80211.c | 2 + 5 files changed, 97 insertions(+), 8 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index defb7e36c1f2..0054c234fb9a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -836,6 +836,8 @@ static int hwsim_get_chanwidth(enum nl80211_chan_width bw) case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 160; + case NL80211_CHAN_WIDTH_320: + return 320; case NL80211_CHAN_WIDTH_5: return 5; case NL80211_CHAN_WIDTH_10: diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7bec15f605be..a27f2d699dac 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -109,7 +109,11 @@ struct wiphy; * on this channel. * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted * on this channel. - * + * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band, + * this flag indicates that a 320 MHz channel cannot use this + * channel as the control or any of the secondary channels. + * This may be due to the driver or due to regulatory bandwidth + * restrictions. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -131,6 +135,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_4MHZ = 1<<16, IEEE80211_CHAN_8MHZ = 1<<17, IEEE80211_CHAN_16MHZ = 1<<18, + IEEE80211_CHAN_NO_320MHZ = 1<<19, }; #define IEEE80211_CHAN_NO_HT40 \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d305a8b8c536..9e05973f3f56 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4684,6 +4684,8 @@ enum nl80211_key_mode { * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel + * @NL80211_CHAN_WIDTH_320: 320 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 + * attribute must be provided as well */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -4699,6 +4701,7 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_4, NL80211_CHAN_WIDTH_8, NL80211_CHAN_WIDTH_16, + NL80211_CHAN_WIDTH_320, }; /** diff --git a/net/wireless/chan.c b/net/wireless/chan.c index eb822052d344..8b7fb4a9e07b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -181,6 +181,9 @@ static int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width) case NL80211_CHAN_WIDTH_160: mhz = 160; break; + case NL80211_CHAN_WIDTH_320: + mhz = 320; + break; default: WARN_ON_ONCE(1); return -1; @@ -271,6 +274,17 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_16: /* all checked above */ break; + case NL80211_CHAN_WIDTH_320: + if (chandef->center_freq1 == control_freq + 150 || + chandef->center_freq1 == control_freq + 130 || + chandef->center_freq1 == control_freq + 110 || + chandef->center_freq1 == control_freq + 90 || + chandef->center_freq1 == control_freq - 90 || + chandef->center_freq1 == control_freq - 110 || + chandef->center_freq1 == control_freq - 130 || + chandef->center_freq1 == control_freq - 150) + break; + fallthrough; case NL80211_CHAN_WIDTH_160: if (chandef->center_freq1 == control_freq + 70 || chandef->center_freq1 == control_freq + 50 || @@ -307,7 +321,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) EXPORT_SYMBOL(cfg80211_chandef_valid); static void chandef_primary_freqs(const struct cfg80211_chan_def *c, - u32 *pri40, u32 *pri80) + u32 *pri40, u32 *pri80, u32 *pri160) { int tmp; @@ -315,9 +329,11 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c, case NL80211_CHAN_WIDTH_40: *pri40 = c->center_freq1; *pri80 = 0; + *pri160 = 0; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: + *pri160 = 0; *pri80 = c->center_freq1; /* n_P20 */ tmp = (30 + c->chan->center_freq - c->center_freq1)/20; @@ -327,6 +343,7 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c, *pri40 = c->center_freq1 - 20 + 40 * tmp; break; case NL80211_CHAN_WIDTH_160: + *pri160 = c->center_freq1; /* n_P20 */ tmp = (70 + c->chan->center_freq - c->center_freq1)/20; /* n_P40 */ @@ -337,6 +354,20 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c, tmp /= 2; *pri80 = c->center_freq1 - 40 + 80 * tmp; break; + case NL80211_CHAN_WIDTH_320: + /* n_P20 */ + tmp = (150 + c->chan->center_freq - c->center_freq1) / 20; + /* n_P40 */ + tmp /= 2; + /* freq_P40 */ + *pri40 = c->center_freq1 - 140 + 40 * tmp; + /* n_P80 */ + tmp /= 2; + *pri80 = c->center_freq1 - 120 + 80 * tmp; + /* n_P160 */ + tmp /= 2; + *pri160 = c->center_freq1 - 80 + 160 * tmp; + break; default: WARN_ON_ONCE(1); } @@ -346,7 +377,7 @@ const struct cfg80211_chan_def * cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, const struct cfg80211_chan_def *c2) { - u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80; + u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160; /* If they are identical, return */ if (cfg80211_chandef_identical(c1, c2)) @@ -381,14 +412,31 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, c2->width == NL80211_CHAN_WIDTH_20) return c1; - chandef_primary_freqs(c1, &c1_pri40, &c1_pri80); - chandef_primary_freqs(c2, &c2_pri40, &c2_pri80); + chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160); + chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160); if (c1_pri40 != c2_pri40) return NULL; - WARN_ON(!c1_pri80 && !c2_pri80); - if (c1_pri80 && c2_pri80 && c1_pri80 != c2_pri80) + if (c1->width == NL80211_CHAN_WIDTH_40) + return c2; + + if (c2->width == NL80211_CHAN_WIDTH_40) + return c1; + + if (c1_pri80 != c2_pri80) + return NULL; + + if (c1->width == NL80211_CHAN_WIDTH_80 && + c2->width > NL80211_CHAN_WIDTH_80) + return c2; + + if (c2->width == NL80211_CHAN_WIDTH_80 && + c1->width > NL80211_CHAN_WIDTH_80) + return c1; + + WARN_ON(!c1_pri160 && !c2_pri160); + if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160) return NULL; if (c1->width > c2->width) @@ -960,7 +1008,10 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, struct ieee80211_sta_vht_cap *vht_cap; struct ieee80211_edmg *edmg_cap; u32 width, control_freq, cap; - bool ext_nss_cap, support_80_80 = false; + bool ext_nss_cap, support_80_80 = false, support_320 = false; + const struct ieee80211_sband_iftype_data *iftd; + struct ieee80211_supported_band *sband; + int i; if (WARN_ON(!cfg80211_chandef_valid(chandef))) return false; @@ -1062,6 +1113,32 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))) return false; break; + case NL80211_CHAN_WIDTH_320: + prohibited_flags |= IEEE80211_CHAN_NO_320MHZ; + width = 320; + + if (chandef->chan->band != NL80211_BAND_6GHZ) + return false; + + sband = wiphy->bands[NL80211_BAND_6GHZ]; + if (!sband) + return false; + + for (i = 0; i < sband->n_iftype_data; i++) { + iftd = &sband->iftype_data[i]; + if (!iftd->eht_cap.has_eht) + continue; + + if (iftd->eht_cap.eht_cap_elem.phy_cap_info[0] & + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) { + support_320 = true; + break; + } + } + + if (!support_320) + return false; + break; default: WARN_ON_ONCE(1); return false; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2dfd45f35486..f93b9686fb41 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10589,6 +10589,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) NL80211_EXT_FEATURE_VHT_IBSS)) return -EINVAL; break; + case NL80211_CHAN_WIDTH_320: + return -EINVAL; default: return -EINVAL; } -- cgit v1.2.3-71-gd317 From cfb14110acf87b4db62e07ba08a80429f1749f40 Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Mon, 14 Feb 2022 17:29:55 +0100 Subject: nl80211: add EHT MCS support Add support for reporting and calculating EHT bitrates. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/1640163883-12696-7-git-send-email-quic_vjakkam@quicinc.com Link: https://lore.kernel.org/r/20220214163009.175289-2-johannes@sipsolutions.net Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 11 ++++ include/uapi/linux/nl80211.h | 62 ++++++++++++++++++++ net/wireless/nl80211.c | 19 +++++++ net/wireless/util.c | 131 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 223 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a27f2d699dac..f35ffd81d213 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1595,6 +1595,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_HE_MCS: HE MCS information * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS + * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), @@ -1604,6 +1605,7 @@ enum rate_info_flags { RATE_INFO_FLAGS_HE_MCS = BIT(4), RATE_INFO_FLAGS_EDMG = BIT(5), RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6), + RATE_INFO_FLAGS_EHT_MCS = BIT(7), }; /** @@ -1618,6 +1620,8 @@ enum rate_info_flags { * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation + * @RATE_INFO_BW_320: 320 MHz bandwidth + * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation */ enum rate_info_bw { RATE_INFO_BW_20 = 0, @@ -1627,6 +1631,8 @@ enum rate_info_bw { RATE_INFO_BW_80, RATE_INFO_BW_160, RATE_INFO_BW_HE_RU, + RATE_INFO_BW_320, + RATE_INFO_BW_EHT_RU, }; /** @@ -1644,6 +1650,9 @@ enum rate_info_bw { * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4) + * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi) + * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc, + * only valid if bw is %RATE_INFO_BW_EHT_RU) */ struct rate_info { u8 flags; @@ -1655,6 +1664,8 @@ struct rate_info { u8 he_dcm; u8 he_ru_alloc; u8 n_bonded_ch; + u8 eht_gi; + u8 eht_ru_alloc; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 9e05973f3f56..d0ba70ea5d04 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3392,6 +3392,56 @@ enum nl80211_he_ru_alloc { NL80211_RATE_INFO_HE_RU_ALLOC_2x996, }; +/** + * enum nl80211_eht_gi - EHT guard interval + * @NL80211_RATE_INFO_EHT_GI_0_8: 0.8 usec + * @NL80211_RATE_INFO_EHT_GI_1_6: 1.6 usec + * @NL80211_RATE_INFO_EHT_GI_3_2: 3.2 usec + */ +enum nl80211_eht_gi { + NL80211_RATE_INFO_EHT_GI_0_8, + NL80211_RATE_INFO_EHT_GI_1_6, + NL80211_RATE_INFO_EHT_GI_3_2, +}; + +/** + * enum nl80211_eht_ru_alloc - EHT RU allocation values + * @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_52P26: 52+26-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_106: 106-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_106P26: 106+26 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_242: 242-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_484: 484-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_484P242: 484+242 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996: 996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484: 996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242: 996+484+242 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996: 2x996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484: 2x996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996: 3x996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484: 3x996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_4x996: 4x996-tone RU allocation + */ +enum nl80211_eht_ru_alloc { + NL80211_RATE_INFO_EHT_RU_ALLOC_26, + NL80211_RATE_INFO_EHT_RU_ALLOC_52, + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26, + NL80211_RATE_INFO_EHT_RU_ALLOC_106, + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26, + NL80211_RATE_INFO_EHT_RU_ALLOC_242, + NL80211_RATE_INFO_EHT_RU_ALLOC_484, + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242, + NL80211_RATE_INFO_EHT_RU_ALLOC_996, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996, +}; + /** * enum nl80211_rate_info - bitrate information * @@ -3431,6 +3481,13 @@ enum nl80211_he_ru_alloc { * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) + * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate + * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15) + * @NL80211_RATE_INFO_EHT_NSS: EHT NSS value (u8, 1-8) + * @NL80211_RATE_INFO_EHT_GI: EHT guard interval identifier + * (u8, see &enum nl80211_eht_gi) + * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then + * non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc) * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { @@ -3452,6 +3509,11 @@ enum nl80211_rate_info { NL80211_RATE_INFO_HE_GI, NL80211_RATE_INFO_HE_DCM, NL80211_RATE_INFO_HE_RU_ALLOC, + NL80211_RATE_INFO_320_MHZ_WIDTH, + NL80211_RATE_INFO_EHT_MCS, + NL80211_RATE_INFO_EHT_NSS, + NL80211_RATE_INFO_EHT_GI, + NL80211_RATE_INFO_EHT_RU_ALLOC, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f93b9686fb41..a267e838f79c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5954,6 +5954,14 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) case RATE_INFO_BW_HE_RU: rate_flg = 0; WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS)); + break; + case RATE_INFO_BW_320: + rate_flg = NL80211_RATE_INFO_320_MHZ_WIDTH; + break; + case RATE_INFO_BW_EHT_RU: + rate_flg = 0; + WARN_ON(!(info->flags & RATE_INFO_FLAGS_EHT_MCS)); + break; } if (rate_flg && nla_put_flag(msg, rate_flg)) @@ -5986,6 +5994,17 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, info->he_ru_alloc)) return false; + } else if (info->flags & RATE_INFO_FLAGS_EHT_MCS) { + if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_MCS, info->mcs)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_NSS, info->nss)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_GI, info->eht_gi)) + return false; + if (info->bw == RATE_INFO_BW_EHT_RU && + nla_put_u8(msg, NL80211_RATE_INFO_EHT_RU_ALLOC, + info->eht_ru_alloc)) + return false; } nla_nest_end(msg, rate); diff --git a/net/wireless/util.c b/net/wireless/util.c index e02f1702806e..2eda097aee7f 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1430,6 +1430,135 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) return result / 10000; } +static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate) +{ +#define SCALE 6144 + static const u32 mcs_divisors[16] = { + 102399, /* 16.666666... */ + 51201, /* 8.333333... */ + 34134, /* 5.555555... */ + 25599, /* 4.166666... */ + 17067, /* 2.777777... */ + 12801, /* 2.083333... */ + 11769, /* 1.851851... */ + 10239, /* 1.666666... */ + 8532, /* 1.388888... */ + 7680, /* 1.250000... */ + 6828, /* 1.111111... */ + 6144, /* 1.000000... */ + 5690, /* 0.926106... */ + 5120, /* 0.833333... */ + 409600, /* 66.666666... */ + 204800, /* 33.333333... */ + }; + static const u32 rates_996[3] = { 480388888, 453700000, 408333333 }; + static const u32 rates_484[3] = { 229411111, 216666666, 195000000 }; + static const u32 rates_242[3] = { 114711111, 108333333, 97500000 }; + static const u32 rates_106[3] = { 40000000, 37777777, 34000000 }; + static const u32 rates_52[3] = { 18820000, 17777777, 16000000 }; + static const u32 rates_26[3] = { 9411111, 8888888, 8000000 }; + u64 tmp; + u32 result; + + if (WARN_ON_ONCE(rate->mcs > 15)) + return 0; + if (WARN_ON_ONCE(rate->eht_gi > NL80211_RATE_INFO_EHT_GI_3_2)) + return 0; + if (WARN_ON_ONCE(rate->eht_ru_alloc > + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) + return 0; + if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) + return 0; + + /* Bandwidth checks for MCS 14 */ + if (rate->mcs == 14) { + if ((rate->bw != RATE_INFO_BW_EHT_RU && + rate->bw != RATE_INFO_BW_80 && + rate->bw != RATE_INFO_BW_160 && + rate->bw != RATE_INFO_BW_320) || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_996 && + rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 && + rate->eht_ru_alloc != NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) { + WARN(1, "invalid EHT BW for MCS 14: bw:%d, ru:%d\n", + rate->bw, rate->eht_ru_alloc); + return 0; + } + } + + if (rate->bw == RATE_INFO_BW_320 || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_4x996)) + result = 4 * rates_996[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484) + result = 3 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_3x996) + result = 3 * rates_996[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484) + result = 2 * rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_160 || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_2x996)) + result = 2 * rates_996[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242) + result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi] + + rates_242[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996P484) + result = rates_996[rate->eht_gi] + rates_484[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_80 || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_996)) + result = rates_996[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484P242) + result = rates_484[rate->eht_gi] + rates_242[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_40 || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_484)) + result = rates_484[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_20 || + (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_242)) + result = rates_242[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106P26) + result = rates_106[rate->eht_gi] + rates_26[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_106) + result = rates_106[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52P26) + result = rates_52[rate->eht_gi] + rates_26[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_52) + result = rates_52[rate->eht_gi]; + else if (rate->bw == RATE_INFO_BW_EHT_RU && + rate->eht_ru_alloc == NL80211_RATE_INFO_EHT_RU_ALLOC_26) + result = rates_26[rate->eht_gi]; + else { + WARN(1, "invalid EHT MCS: bw:%d, ru:%d\n", + rate->bw, rate->eht_ru_alloc); + return 0; + } + + /* now scale to the appropriate MCS */ + tmp = result; + tmp *= SCALE; + do_div(tmp, mcs_divisors[rate->mcs]); + result = tmp; + + /* and take NSS */ + result = (result * rate->nss) / 8; + + return result / 10000; +} + u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) @@ -1444,6 +1573,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) return cfg80211_calculate_bitrate_vht(rate); if (rate->flags & RATE_INFO_FLAGS_HE_MCS) return cfg80211_calculate_bitrate_he(rate); + if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) + return cfg80211_calculate_bitrate_eht(rate); return rate->legacy; } -- cgit v1.2.3-71-gd317 From c2b3d7699fb0ce66538b829af43970acc2f89060 Mon Sep 17 00:00:00 2001 From: Sriram R Date: Mon, 14 Feb 2022 17:29:56 +0100 Subject: nl80211: add support for 320MHz channel limitation Add support to advertise drivers or regulatory limitations on 320 MHz channels to userspace. Signed-off-by: Sriram R Co-authored-by: Karthikeyan Periyasamy Signed-off-by: Karthikeyan Periyasamy Co-authored-by: Veerendranath Jakkam Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/1640163883-12696-6-git-send-email-quic_vjakkam@quicinc.com Link: https://lore.kernel.org/r/20220214163009.175289-3-johannes@sipsolutions.net Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 5 +++++ net/wireless/nl80211.c | 3 +++ net/wireless/reg.c | 6 ++++++ 3 files changed, 14 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d0ba70ea5d04..6a338dafcd07 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3997,6 +3997,8 @@ enum nl80211_wmm_rule { * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel + * as the primary or any of the secondary channels isn't possible * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4033,6 +4035,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_4MHZ, NL80211_FREQUENCY_ATTR_8MHZ, NL80211_FREQUENCY_ATTR_16MHZ, + NL80211_FREQUENCY_ATTR_NO_320MHZ, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4231,6 +4234,7 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed * @NL80211_RRF_NO_HE: HE operation not allowed + * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4249,6 +4253,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_80MHZ = 1<<15, NL80211_RRF_NO_160MHZ = 1<<16, NL80211_RRF_NO_HE = 1<<17, + NL80211_RRF_NO_320MHZ = 1<<18, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a267e838f79c..7edf0bb4e90e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1156,6 +1156,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_16MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_320MHZ) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_320MHZ)) + goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ec25924a1c26..c76cd973f06e 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1238,6 +1238,8 @@ unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, { unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule); + if (rule->flags & NL80211_RRF_NO_320MHZ) + bw = min_t(unsigned int, bw, MHZ_TO_KHZ(160)); if (rule->flags & NL80211_RRF_NO_160MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80)); if (rule->flags & NL80211_RRF_NO_80MHZ) @@ -1611,6 +1613,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_160MHZ; if (rd_flags & NL80211_RRF_NO_HE) channel_flags |= IEEE80211_CHAN_NO_HE; + if (rd_flags & NL80211_RRF_NO_320MHZ) + channel_flags |= IEEE80211_CHAN_NO_320MHZ; return channel_flags; } @@ -1773,6 +1777,8 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) bw_flags |= IEEE80211_CHAN_NO_160MHZ; + if (max_bandwidth_khz < MHZ_TO_KHZ(320)) + bw_flags |= IEEE80211_CHAN_NO_320MHZ; } return bw_flags; } -- cgit v1.2.3-71-gd317 From 31846b657857e6a73d982604f36a34710d98902c Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 14 Feb 2022 17:29:57 +0100 Subject: cfg80211: add NO-EHT flag to regulatory This may be necessary in some cases, add a flag and propagate it, just like the NO-HE that already exists. Signed-off-by: Ilan Peer [split off from a combined 320/no-EHT patch] Link: https://lore.kernel.org/r/20220214173004.dbb85a7b86bb.Ifc1e2daac51c1cc5f895ccfb79faf5eaec3950ec@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 3 +++ net/wireless/nl80211.c | 3 +++ 3 files changed, 8 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f35ffd81d213..5cfc483dece1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -114,6 +114,7 @@ struct wiphy; * channel as the control or any of the secondary channels. * This may be due to the driver or due to regulatory bandwidth * restrictions. + * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -136,6 +137,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_8MHZ = 1<<17, IEEE80211_CHAN_16MHZ = 1<<18, IEEE80211_CHAN_NO_320MHZ = 1<<19, + IEEE80211_CHAN_NO_EHT = 1<<20, }; #define IEEE80211_CHAN_NO_HT40 \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6a338dafcd07..baf6433c0119 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3999,6 +3999,8 @@ enum nl80211_wmm_rule { * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel * as the primary or any of the secondary channels isn't possible + * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel + * in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4036,6 +4038,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_8MHZ, NL80211_FREQUENCY_ATTR_16MHZ, NL80211_FREQUENCY_ATTR_NO_320MHZ, + NL80211_FREQUENCY_ATTR_NO_EHT, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7edf0bb4e90e..3e6809f24220 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1159,6 +1159,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_NO_320MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_320MHZ)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_NO_EHT) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_EHT)) + goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, -- cgit v1.2.3-71-gd317 From ea05fd3581d32a0f1098657005c7a9b763798fe8 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 14 Feb 2022 17:29:58 +0100 Subject: cfg80211: Support configuration of station EHT capabilities Add attributes and some code bits to support userspace passing in EHT capabilities of stations. Signed-off-by: Ilan Peer Link: https://lore.kernel.org/r/20220214173004.ecf0b3ff9627.Icb4a5f2ec7b41d9008ac4cfc16c59baeb84793d3@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++++ include/uapi/linux/nl80211.h | 10 +++++++++- net/wireless/nl80211.c | 37 +++++++++++++++++++++++++++++++++---- 3 files changed, 46 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5cfc483dece1..68713388b617 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1487,6 +1487,8 @@ struct sta_txpwr { * @airtime_weight: airtime scheduler weight for this station * @txpwr: transmit power for an associated station * @he_6ghz_capa: HE 6 GHz Band capabilities of station + * @eht_capa: EHT capabilities of station + * @eht_capa_len: the length of the EHT capabilities */ struct station_parameters { const u8 *supported_rates; @@ -1520,6 +1522,8 @@ struct station_parameters { u16 airtime_weight; struct sta_txpwr txpwr; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_eht_cap_elem *eht_capa; + u8 eht_capa_len; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index baf6433c0119..98ed52663d6b 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2659,6 +2659,10 @@ enum nl80211_commands { * enumerated in &enum nl80211_ap_settings_flags. This attribute shall be * used with %NL80211_CMD_START_AP request. * + * @NL80211_ATTR_EHT_CAPABILITY: EHT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION). Can be set + * only if %NL80211_STA_FLAG_WME is set. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3169,6 +3173,8 @@ enum nl80211_attrs { NL80211_ATTR_AP_SETTINGS_FLAGS, + NL80211_ATTR_EHT_CAPABILITY, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3224,6 +3230,8 @@ enum nl80211_attrs { #define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 +#define NL80211_EHT_MIN_CAPABILITY_LEN 13 +#define NL80211_EHT_MAX_CAPABILITY_LEN 51 #define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3e6809f24220..7543c73a3f1d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include @@ -786,6 +786,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED }, [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 }, + [NL80211_ATTR_EHT_CAPABILITY] = + NLA_POLICY_RANGE(NLA_BINARY, + NL80211_EHT_MIN_CAPABILITY_LEN, + NL80211_EHT_MAX_CAPABILITY_LEN), }; /* policy for the key attributes */ @@ -6425,7 +6429,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, if (params->supported_rates) return -EINVAL; if (params->ext_capab || params->ht_capa || params->vht_capa || - params->he_capa) + params->he_capa || params->eht_capa) return -EINVAL; } @@ -6628,6 +6632,18 @@ static int nl80211_set_station_tdls(struct genl_info *info, nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params->he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) { + params->eht_capa = + nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); + params->eht_capa_len = + nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); + + if (!ieee80211_eht_capa_size_ok((const u8 *)params->he_capa, + (const u8 *)params->eht_capa, + params->eht_capa_len)) + return -EINVAL; + } } err = nl80211_parse_sta_channel_info(info, params); @@ -6885,6 +6901,18 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); + + if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) { + params.eht_capa = + nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); + params.eht_capa_len = + nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); + + if (!ieee80211_eht_capa_size_ok((const u8 *)params.he_capa, + (const u8 *)params.eht_capa, + params.eht_capa_len)) + return -EINVAL; + } } if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) @@ -6934,8 +6962,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.ht_capa = NULL; params.vht_capa = NULL; - /* HE requires WME */ - if (params.he_capa_len || params.he_6ghz_capa) + /* HE and EHT require WME */ + if (params.he_capa_len || params.he_6ghz_capa || + params.eht_capa_len) return -EINVAL; } -- cgit v1.2.3-71-gd317 From 79aa0367385ceaf5351ea77ea1fb66136739ea9d Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 15 Feb 2022 18:54:38 -0500 Subject: drm/amdkfd: Replace zero-length array with flexible-array member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reference: https://www.kernel.org/doc/html/latest/process/deprecated.html#zero-length-and-one-element-arrays CC: Changcheng Deng Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/uapi/linux/kfd_ioctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 6e4268f5e482..baec5a41de3e 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -673,7 +673,7 @@ struct kfd_ioctl_svm_args { __u32 op; __u32 nattr; /* Variable length array of attributes */ - struct kfd_ioctl_svm_attribute attrs[0]; + struct kfd_ioctl_svm_attribute attrs[]; }; /** -- cgit v1.2.3-71-gd317 From 169adc2b6b3c5e86391921117b4ab3aaeb3c6ee1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Feb 2022 11:11:26 +0900 Subject: android/binder.h: add linux/android/binder(fs).h to UAPI compile-test coverage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit linux/android/binder.h and linux/android/binderfs.h are currently excluded from the UAPI compile-test because of the errors like follows: HDRTEST usr/include/linux/android/binder.h In file included from : ./usr/include/linux/android/binder.h:291:9: error: unknown type name ‘pid_t’ 291 | pid_t sender_pid; | ^~~~~ ./usr/include/linux/android/binder.h:292:9: error: unknown type name ‘uid_t’ 292 | uid_t sender_euid; | ^~~~~ The errors can be fixed by replacing {pid,uid}_t with __kernel_{pid,uid}_t. Then, remove the no-header-test entries from user/include/Makefile. Signed-off-by: Masahiro Yamada Reviewed-by: Arnd Bergmann Signed-off-by: Arnd Bergmann --- include/uapi/linux/android/binder.h | 4 ++-- usr/include/Makefile | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 3246f2c74696..11157fae8a8e 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -288,8 +288,8 @@ struct binder_transaction_data { /* General information about the transaction. */ __u32 flags; - pid_t sender_pid; - uid_t sender_euid; + __kernel_pid_t sender_pid; + __kernel_uid_t sender_euid; binder_size_t data_size; /* number of bytes of data */ binder_size_t offsets_size; /* number of bytes of offsets */ diff --git a/usr/include/Makefile b/usr/include/Makefile index 1aa725a3cbbc..0872877c9457 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -23,8 +23,6 @@ override c_flags = $(UAPI_CFLAGS) -Wp,-MMD,$(depfile) -I$(objtree)/usr/include no-header-test += asm/ucontext.h no-header-test += drm/vmwgfx_drm.h no-header-test += linux/am437x-vpfe.h -no-header-test += linux/android/binder.h -no-header-test += linux/android/binderfs.h no-header-test += linux/coda.h no-header-test += linux/cyclades.h no-header-test += linux/errqueue.h -- cgit v1.2.3-71-gd317 From cbf2820341297b9aed0f846aba35556e94569210 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Feb 2022 11:11:27 +0900 Subject: fsmap.h: add linux/fsmap.h to UAPI compile-test coverage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit linux/fsmap.h is currently excluded from the UAPI compile-test because of the error like follows: HDRTEST usr/include/linux/fsmap.h In file included from : ./usr/include/linux/fsmap.h:72:19: error: unknown type name ‘size_t’ 72 | static __inline__ size_t | ^~~~~~ The error can be fixed by replacing size_t with __kernel_size_t. Then, remove the no-header-test entry from user/include/Makefile. Signed-off-by: Masahiro Yamada Reviewed-by: Arnd Bergmann Signed-off-by: Arnd Bergmann --- include/uapi/linux/fsmap.h | 2 +- usr/include/Makefile | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h index 91fd519a3f7d..c690d17f1d07 100644 --- a/include/uapi/linux/fsmap.h +++ b/include/uapi/linux/fsmap.h @@ -69,7 +69,7 @@ struct fsmap_head { }; /* Size of an fsmap_head with room for nr records. */ -static inline size_t +static inline __kernel_size_t fsmap_sizeof( unsigned int nr) { diff --git a/usr/include/Makefile b/usr/include/Makefile index 0872877c9457..3cd15af3266d 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -26,7 +26,6 @@ no-header-test += linux/am437x-vpfe.h no-header-test += linux/coda.h no-header-test += linux/cyclades.h no-header-test += linux/errqueue.h -no-header-test += linux/fsmap.h no-header-test += linux/hdlc/ioctl.h no-header-test += linux/ivtv.h no-header-test += linux/kexec.h -- cgit v1.2.3-71-gd317 From 8b4bca21c2c0cb3b5adb80985830a81d4e5d7081 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Feb 2022 11:11:28 +0900 Subject: kexec.h: add linux/kexec.h to UAPI compile-test coverage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit linux/kexec.h is currently excluded from the UAPI compile-test because of the errors like follows: HDRTEST usr/include/linux/kexec.h In file included from : ./usr/include/linux/kexec.h:56:9: error: unknown type name ‘size_t’ 56 | size_t bufsz; | ^~~~~~ ./usr/include/linux/kexec.h:58:9: error: unknown type name ‘size_t’ 58 | size_t memsz; | ^~~~~~ The errors can be fixed by replacing size_t with __kernel_size_t. Then, remove the no-header-test entry from user/include/Makefile. Signed-off-by: Masahiro Yamada Reviewed-by: Arnd Bergmann Signed-off-by: Arnd Bergmann --- include/uapi/linux/kexec.h | 4 ++-- usr/include/Makefile | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 778dc191c265..fb7e2ef60825 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -54,9 +54,9 @@ */ struct kexec_segment { const void *buf; - size_t bufsz; + __kernel_size_t bufsz; const void *mem; - size_t memsz; + __kernel_size_t memsz; }; #endif /* __KERNEL__ */ diff --git a/usr/include/Makefile b/usr/include/Makefile index 3cd15af3266d..0f59020ae5ef 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -28,7 +28,6 @@ no-header-test += linux/cyclades.h no-header-test += linux/errqueue.h no-header-test += linux/hdlc/ioctl.h no-header-test += linux/ivtv.h -no-header-test += linux/kexec.h no-header-test += linux/matroxfb.h no-header-test += linux/omap3isp.h no-header-test += linux/omapfb.h -- cgit v1.2.3-71-gd317 From 2a5c0fdc70cd653741e910e92ffeb2fa7376db07 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 10 Feb 2022 11:11:29 +0900 Subject: reiserfs_xattr.h: add linux/reiserfs_xattr.h to UAPI compile-test coverage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit linux/reiserfs_xattr.h is currently excluded from the UAPI compile-test because of the error like follows: HDRTEST usr/include/linux/reiserfs_xattr.h In file included from : ./usr/include/linux/reiserfs_xattr.h:22:9: error: unknown type name ‘size_t’ 22 | size_t length; | ^~~~~~ The error can be fixed by replacing size_t with __kernel_size_t. Then, remove the no-header-test entry from user/include/Makefile. Signed-off-by: Masahiro Yamada Reviewed-by: Arnd Bergmann Signed-off-by: Arnd Bergmann --- include/uapi/linux/reiserfs_xattr.h | 2 +- usr/include/Makefile | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/reiserfs_xattr.h b/include/uapi/linux/reiserfs_xattr.h index 28f10842f047..503ad018ce5b 100644 --- a/include/uapi/linux/reiserfs_xattr.h +++ b/include/uapi/linux/reiserfs_xattr.h @@ -19,7 +19,7 @@ struct reiserfs_xattr_header { struct reiserfs_security_handle { const char *name; void *value; - size_t length; + __kernel_size_t length; }; #endif /* _LINUX_REISERFS_XATTR_H */ diff --git a/usr/include/Makefile b/usr/include/Makefile index 0f59020ae5ef..7b283d43f00d 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -33,7 +33,6 @@ no-header-test += linux/omap3isp.h no-header-test += linux/omapfb.h no-header-test += linux/patchkey.h no-header-test += linux/phonet.h -no-header-test += linux/reiserfs_xattr.h no-header-test += linux/sctp.h no-header-test += linux/sysctl.h no-header-test += linux/usb/audio.h -- cgit v1.2.3-71-gd317 From d4568fc8525897e683983806f813be1ae9eedaed Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 Jan 2022 18:29:52 +0100 Subject: media: omap3isp: Use struct_group() for memcpy() region In preparation for FORTIFY_SOURCE performing compile-time and run-time field bounds checking for memcpy(), memmove(), and memset(), avoid intentionally writing across neighboring fields. Wrap the target region in struct_group(). This additionally fixes a theoretical misalignment of the copy (since the size of "buf" changes between 64-bit and 32-bit, but this is likely never built for 64-bit). FWIW, I think this code is totally broken on 64-bit (which appears to not be a "real" build configuration): it would either always fail (with an uninitialized data->buf_size) or would cause corruption in userspace due to the copy_to_user() in the call path against an uninitialized data->buf value: omap3isp_stat_request_statistics_time32(...) struct omap3isp_stat_data data64; ... omap3isp_stat_request_statistics(stat, &data64); int omap3isp_stat_request_statistics(struct ispstat *stat, struct omap3isp_stat_data *data) ... buf = isp_stat_buf_get(stat, data); static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, struct omap3isp_stat_data *data) ... if (buf->buf_size > data->buf_size) { ... return ERR_PTR(-EINVAL); } ... rval = copy_to_user(data->buf, buf->virt_addr, buf->buf_size); Regardless, additionally initialize data64 to be zero-filled to avoid undefined behavior. Link: https://lore.kernel.org/lkml/20211215220505.GB21862@embeddedor Cc: Arnd Bergmann Fixes: 378e3f81cb56 ("media: omap3isp: support 64-bit version of omap3isp_stat_data") Cc: stable@vger.kernel.org Reviewed-by: Gustavo A. R. Silva Signed-off-by: Kees Cook Reviewed-by: Laurent Pinchart Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab --- drivers/media/platform/omap3isp/ispstat.c | 5 +++-- include/uapi/linux/omap3isp.h | 21 +++++++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 5b9b57f4d9bf..68cf68dbcace 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c @@ -512,7 +512,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat, int omap3isp_stat_request_statistics_time32(struct ispstat *stat, struct omap3isp_stat_data_time32 *data) { - struct omap3isp_stat_data data64; + struct omap3isp_stat_data data64 = { }; int ret; ret = omap3isp_stat_request_statistics(stat, &data64); @@ -521,7 +521,8 @@ int omap3isp_stat_request_statistics_time32(struct ispstat *stat, data->ts.tv_sec = data64.ts.tv_sec; data->ts.tv_usec = data64.ts.tv_usec; - memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts)); + data->buf = (uintptr_t)data64.buf; + memcpy(&data->frame, &data64.frame, sizeof(data->frame)); return 0; } diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h index 87b55755f4ff..d9db7ad43890 100644 --- a/include/uapi/linux/omap3isp.h +++ b/include/uapi/linux/omap3isp.h @@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config { * struct omap3isp_stat_data - Statistic data sent to or received from user * @ts: Timestamp of returned framestats. * @buf: Pointer to pass to user. + * @buf_size: Size of buffer. * @frame_number: Frame number of requested stats. * @cur_frame: Current frame number being processed. * @config_counter: Number of the configuration associated with the data. @@ -176,10 +177,12 @@ struct omap3isp_stat_data { struct timeval ts; #endif void __user *buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #ifdef __KERNEL__ @@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 { __s32 tv_usec; } ts; __u32 buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #endif -- cgit v1.2.3-71-gd317 From 6de74d1069b821e96460d0fc2edfc35785db04fb Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Wed, 9 Feb 2022 08:11:10 -0800 Subject: hv_utils: Add comment about max VMbus packet size in VSS driver The VSS driver allocates a VMbus receive buffer significantly larger than sizeof(hv_vss_msg), with no explanation. To help prevent future mistakes, add a #define and comment about why this is done. No functional change. Signed-off-by: Michael Kelley Link: https://lore.kernel.org/r/1644423070-75125-1-git-send-email-mikelley@microsoft.com Signed-off-by: Wei Liu --- drivers/hv/hv_snapshot.c | 7 +++++-- include/uapi/linux/hyperv.h | 11 +++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 6018b9d1b1fb..0d2184be1691 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -31,6 +31,9 @@ static const int fw_versions[] = { UTIL_FW_VERSION }; +/* See comment with struct hv_vss_msg regarding the max VMbus packet size */ +#define VSS_MAX_PKT_SIZE (HV_HYP_PAGE_SIZE * 2) + /* * Timeout values are based on expecations from host */ @@ -298,7 +301,7 @@ void hv_vss_onchannelcallback(void *context) if (vss_transaction.state > HVUTIL_READY) return; - if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) { + if (vmbus_recvpacket(channel, recv_buffer, VSS_MAX_PKT_SIZE, &recvlen, &requestid)) { pr_err_ratelimited("VSS request received. Could not read into recv buf\n"); return; } @@ -375,7 +378,7 @@ hv_vss_init(struct hv_util_service *srv) } recv_buffer = srv->recv_buffer; vss_transaction.recv_channel = srv->channel; - vss_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2; + vss_transaction.recv_channel->max_pkt_size = VSS_MAX_PKT_SIZE; /* * When this driver loads, the user level daemon that diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index daf82a230c0e..aaa502a7bff4 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -90,6 +90,17 @@ struct hv_vss_check_dm_info { __u32 flags; } __attribute__((packed)); +/* + * struct hv_vss_msg encodes the fields that the Linux VSS + * driver accesses. However, FREEZE messages from Hyper-V contain + * additional LUN information that Linux doesn't use and are not + * represented in struct hv_vss_msg. A received FREEZE message may + * be as large as 6,260 bytes, so the driver must allocate at least + * that much space, not sizeof(struct hv_vss_msg). Other messages + * such as AUTO_RECOVER may be as large as 12,500 bytes. However, + * because the Linux VSS driver responds that it doesn't support + * auto-recovery, it should not receive such messages. + */ struct hv_vss_msg { union { struct hv_vss_hdr vss_hdr; -- cgit v1.2.3-71-gd317 From 47f0bd5032106469827cf56c8b45bb9101112105 Mon Sep 17 00:00:00 2001 From: Jacques de Laval Date: Thu, 17 Feb 2022 16:02:02 +0100 Subject: net: Add new protocol attribute to IP addresses This patch adds a new protocol attribute to IPv4 and IPv6 addresses. Inspiration was taken from the protocol attribute of routes. User space applications like iproute2 can set/get the protocol with the Netlink API. The attribute is stored as an 8-bit unsigned integer. The protocol attribute is set by kernel for these categories: - IPv4 and IPv6 loopback addresses - IPv6 addresses generated from router announcements - IPv6 link local addresses User space may pass custom protocols, not defined by the kernel. Grouping addresses on their origin is useful in scenarios where you want to distinguish between addresses based on who added them, e.g. kernel vs. user space. Tagging addresses with a string label is an existing feature that could be used as a solution. Unfortunately the max length of a label is 15 characters, and for compatibility reasons the label must be prefixed with the name of the device followed by a colon. Since device names also have a max length of 15 characters, only -1 characters is guaranteed to be available for any origin tag, which is not that much. A reference implementation of user space setting and getting protocols is available for iproute2: https://github.com/westermo/iproute2/commit/9a6ea18bd79f47f293e5edc7780f315ea42ff540 Signed-off-by: Jacques de Laval Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20220217150202.80802-1-Jacques.De.Laval@westermo.com Signed-off-by: Jakub Kicinski --- include/linux/inetdevice.h | 1 + include/net/addrconf.h | 2 ++ include/net/if_inet6.h | 2 ++ include/uapi/linux/if_addr.h | 9 ++++++++- net/ipv4/devinet.c | 7 +++++++ net/ipv6/addrconf.c | 27 +++++++++++++++++++++------ 6 files changed, 41 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 674aeead6260..ead323243e7b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -150,6 +150,7 @@ struct in_ifaddr { __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; + unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 59940e230b78..f7506f08e505 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -64,6 +64,8 @@ struct ifa6_config { const struct in6_addr *pfx; unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; u32 rt_priority; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index f026cf08a8e8..4cfdef6ca4f6 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -71,6 +71,8 @@ struct inet6_ifaddr { bool tokenized; + u8 ifa_proto; + struct rcu_head rcu; struct in6_addr peer_addr; }; diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index dfcf3ce0097f..1c392dd95a5e 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -33,8 +33,9 @@ enum { IFA_CACHEINFO, IFA_MULTICAST, IFA_FLAGS, - IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ + IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ IFA_TARGET_NETNSID, + IFA_PROTO, /* u8, address protocol */ __IFA_MAX, }; @@ -69,4 +70,10 @@ struct ifa_cacheinfo { #define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) #endif +/* ifa_proto */ +#define IFAPROT_UNSPEC 0 +#define IFAPROT_KERNEL_LO 1 /* loopback */ +#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */ +#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */ + #endif diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index fba2bffd65f7..53a6b14dc50a 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -104,6 +104,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { [IFA_FLAGS] = { .type = NLA_U32 }, [IFA_RT_PRIORITY] = { .type = NLA_U32 }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, + [IFA_PROTO] = { .type = NLA_U8 }, }; struct inet_fill_args { @@ -889,6 +890,9 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, if (tb[IFA_RT_PRIORITY]) ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); + if (tb[IFA_PROTO]) + ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]); + if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ci; @@ -1625,6 +1629,7 @@ static size_t inet_nlmsg_size(void) + nla_total_size(4) /* IFA_BROADCAST */ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(4) /* IFA_FLAGS */ + + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ } @@ -1699,6 +1704,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || (ifa->ifa_label[0] && nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) || + (ifa->ifa_proto && + nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) || nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) || (ifa->ifa_rt_priority && nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) || diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2f307da17f21..85bab3a35709 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1115,6 +1115,7 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, ifa->prefix_len = cfg->plen; ifa->rt_priority = cfg->rt_priority; ifa->flags = cfg->ifa_flags; + ifa->ifa_proto = cfg->ifa_proto; /* No need to add the TENTATIVE flag for addresses with NODAD */ if (!(cfg->ifa_flags & IFA_F_NODAD)) ifa->flags |= IFA_F_TENTATIVE; @@ -2593,6 +2594,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, .valid_lft = valid_lft, .preferred_lft = prefered_lft, .scope = addr_type & IPV6_ADDR_SCOPE_MASK, + .ifa_proto = IFAPROT_KERNEL_RA }; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD @@ -3077,7 +3079,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg) } static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, - int plen, int scope) + int plen, int scope, u8 proto) { struct inet6_ifaddr *ifp; struct ifa6_config cfg = { @@ -3086,7 +3088,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, .ifa_flags = IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, - .scope = scope + .scope = scope, + .ifa_proto = proto }; ifp = ipv6_add_addr(idev, &cfg, true, NULL); @@ -3131,7 +3134,7 @@ static void add_v4_addrs(struct inet6_dev *idev) } if (addr.s6_addr32[3]) { - add_addr(idev, &addr, plen, scope); + add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); return; @@ -3154,7 +3157,8 @@ static void add_v4_addrs(struct inet6_dev *idev) flag |= IFA_HOST; } - add_addr(idev, &addr, plen, flag); + add_addr(idev, &addr, plen, flag, + IFAPROT_UNSPEC); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, GFP_KERNEL); } @@ -3177,7 +3181,7 @@ static void init_loopback(struct net_device *dev) return; } - add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO); } void addrconf_add_linklocal(struct inet6_dev *idev, @@ -3189,7 +3193,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev, .ifa_flags = flags | IFA_F_PERMANENT, .valid_lft = INFINITY_LIFE_TIME, .preferred_lft = INFINITY_LIFE_TIME, - .scope = IFA_LINK + .scope = IFA_LINK, + .ifa_proto = IFAPROT_KERNEL_LL }; struct inet6_ifaddr *ifp; @@ -4627,6 +4632,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = { [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, + [IFA_PROTO] = { .type = NLA_U8 }, }; static int @@ -4752,6 +4758,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, ifp->tstamp = jiffies; ifp->valid_lft = cfg->valid_lft; ifp->prefered_lft = cfg->preferred_lft; + ifp->ifa_proto = cfg->ifa_proto; if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority) ifp->rt_priority = cfg->rt_priority; @@ -4845,6 +4852,9 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[IFA_RT_PRIORITY]) cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); + if (tb[IFA_PROTO]) + cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]); + cfg.valid_lft = INFINITY_LIFE_TIME; cfg.preferred_lft = INFINITY_LIFE_TIME; @@ -4948,6 +4958,7 @@ static inline int inet6_ifaddr_msgsize(void) + nla_total_size(16) /* IFA_ADDRESS */ + nla_total_size(sizeof(struct ifa_cacheinfo)) + nla_total_size(4) /* IFA_FLAGS */ + + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */; } @@ -5025,6 +5036,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0) goto error; + if (ifa->ifa_proto && + nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) + goto error; + nlmsg_end(skb, nlh); return 0; -- cgit v1.2.3-71-gd317 From 4b340a5a726dafba15b366c4009aa0a8f77631ac Mon Sep 17 00:00:00 2001 From: Mobashshera Rasool Date: Thu, 17 Feb 2022 07:46:40 +0000 Subject: net: ip6mr: add support for passing full packet on wrong mif This patch adds support for MRT6MSG_WRMIFWHOLE which is used to pass full packet and real vif id when the incoming interface is wrong. While the RP and FHR are setting up state we need to be sending the registers encapsulated with all the data inside otherwise we lose it. The RP then decapsulates it and forwards it to the interested parties. Currently with WRONGMIF we can only be sending empty register packets and will lose that data. This behaviour can be enabled by using MRT_PIM with val == MRT6MSG_WRMIFWHOLE. This doesn't prevent MRT6MSG_WRONGMIF from happening, it happens in addition to it, also it is controlled by the same throttling parameters as WRONGMIF (i.e. 1 packet per 3 seconds currently). Both messages are generated to keep backwards compatibily and avoid breaking someone who was enabling MRT_PIM with val == 4, since any positive val is accepted and treated the same. Signed-off-by: Mobashshera Rasool Signed-off-by: David S. Miller --- include/uapi/linux/mroute6.h | 1 + net/ipv6/ip6mr.c | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index a1fd6173e2db..1d90c21a6251 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -134,6 +134,7 @@ struct mrt6msg { #define MRT6MSG_NOCACHE 1 #define MRT6MSG_WRONGMIF 2 #define MRT6MSG_WHOLEPKT 3 /* used for use level encap */ +#define MRT6MSG_WRMIFWHOLE 4 /* For PIM Register and assert processing */ __u8 im6_mbz; /* must be zero */ __u8 im6_msgtype; /* what type of message */ __u16 im6_mif; /* mif rec'd on */ diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0ebaaec3faf9..a9775c830194 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1040,7 +1040,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, int ret; #ifdef CONFIG_IPV6_PIMSM_V2 - if (assert == MRT6MSG_WHOLEPKT) + if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE) skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt) +sizeof(*msg)); else @@ -1056,7 +1056,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, skb->ip_summed = CHECKSUM_UNNECESSARY; #ifdef CONFIG_IPV6_PIMSM_V2 - if (assert == MRT6MSG_WHOLEPKT) { + if (assert == MRT6MSG_WHOLEPKT || assert == MRT6MSG_WRMIFWHOLE) { /* Ugly, but we have no choice with this interface. Duplicate old header, fix length etc. And all this only to mangle msg->im6_msgtype and @@ -1068,8 +1068,11 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, skb_reset_transport_header(skb); msg = (struct mrt6msg *)skb_transport_header(skb); msg->im6_mbz = 0; - msg->im6_msgtype = MRT6MSG_WHOLEPKT; - msg->im6_mif = mrt->mroute_reg_vif_num; + msg->im6_msgtype = assert; + if (assert == MRT6MSG_WRMIFWHOLE) + msg->im6_mif = mifi; + else + msg->im6_mif = mrt->mroute_reg_vif_num; msg->im6_pad = 0; msg->im6_src = ipv6_hdr(pkt)->saddr; msg->im6_dst = ipv6_hdr(pkt)->daddr; @@ -1650,6 +1653,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, mifi_t mifi; struct net *net = sock_net(sk); struct mr_table *mrt; + bool do_wrmifwhole; if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_ICMPV6) @@ -1763,12 +1767,15 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, return -EINVAL; if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; + + do_wrmifwhole = (v == MRT6MSG_WRMIFWHOLE); v = !!v; rtnl_lock(); ret = 0; if (v != mrt->mroute_do_pim) { mrt->mroute_do_pim = v; mrt->mroute_do_assert = v; + mrt->mroute_do_wrvifwhole = do_wrmifwhole; } rtnl_unlock(); return ret; @@ -2144,6 +2151,9 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt, MFC_ASSERT_THRESH)) { c->_c.mfc_un.res.last_assert = jiffies; ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF); + if (mrt->mroute_do_wrvifwhole) + ip6mr_cache_report(mrt, skb, true_vifi, + MRT6MSG_WRMIFWHOLE); } goto dont_forward; } -- cgit v1.2.3-71-gd317 From a1dc6308865df719efb2a2f8a5f0f5979602d267 Mon Sep 17 00:00:00 2001 From: Amitay Isaacs Date: Fri, 21 Jan 2022 16:08:16 +1030 Subject: fsi: sbefifo: Implement FSI_SBEFIFO_READ_TIMEOUT_SECONDS ioctl FSI_SBEFIFO_READ_TIMEOUT_SECONDS ioctl sets the read timeout (in seconds) for the response received by sbefifo device from sbe. The timeout affects only the read operation on current sbefifo device fd. Certain SBE operations can take long time to complete and the default timeout of 10 seconds might not be sufficient to start receiving response from SBE. In such cases, allow the timeout to be set to the maximum of 120 seconds. The kernel does not contain the definition of the various SBE operations, so we must expose an interface to userspace to set the timeout for the given operation. Signed-off-by: Amitay Isaacs Signed-off-by: Joel Stanley Reviewed-by: Eddie James Link: https://lore.kernel.org/r/20220121053816.82253-3-joel@jms.id.au Signed-off-by: Joel Stanley --- drivers/fsi/fsi-sbefifo.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/fsi.h | 14 ++++++++++++++ 2 files changed, 63 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index 1e9b326e8f67..f52a912cdf16 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -32,6 +32,8 @@ #include #include +#include + /* * The SBEFIFO is a pipe-like FSI device for communicating with * the self boot engine on POWER processors. @@ -134,6 +136,7 @@ struct sbefifo_user { void *cmd_page; void *pending_cmd; size_t pending_len; + u32 read_timeout_ms; }; static DEFINE_MUTEX(sbefifo_ffdc_mutex); @@ -796,6 +799,7 @@ static int sbefifo_user_open(struct inode *inode, struct file *file) return -ENOMEM; } mutex_init(&user->file_lock); + user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP; return 0; } @@ -838,7 +842,9 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf, rc = mutex_lock_interruptible(&sbefifo->lock); if (rc) goto bail; + sbefifo->timeout_start_rsp_ms = user->read_timeout_ms; rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter); + sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP; mutex_unlock(&sbefifo->lock); if (rc < 0) goto bail; @@ -928,12 +934,55 @@ static int sbefifo_user_release(struct inode *inode, struct file *file) return 0; } +static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp) +{ + struct device *dev = &user->sbefifo->dev; + u32 timeout; + + if (get_user(timeout, (__u32 __user *)argp)) + return -EFAULT; + + if (timeout == 0) { + user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP; + dev_dbg(dev, "Timeout reset to %d\n", user->read_timeout_ms); + return 0; + } + + if (timeout < 10 || timeout > 120) + return -EINVAL; + + user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */ + + dev_dbg(dev, "Timeout set to %d\n", user->read_timeout_ms); + + return 0; +} + +static long sbefifo_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sbefifo_user *user = file->private_data; + int rc = -ENOTTY; + + if (!user) + return -EINVAL; + + mutex_lock(&user->file_lock); + switch (cmd) { + case FSI_SBEFIFO_READ_TIMEOUT_SECONDS: + rc = sbefifo_read_timeout(user, (void __user *)arg); + break; + } + mutex_unlock(&user->file_lock); + return rc; +} + static const struct file_operations sbefifo_fops = { .owner = THIS_MODULE, .open = sbefifo_user_open, .read = sbefifo_user_read, .write = sbefifo_user_write, .release = sbefifo_user_release, + .unlocked_ioctl = sbefifo_user_ioctl, }; static void sbefifo_free(struct device *dev) diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h index da577ecd90e7..b2f1977378c7 100644 --- a/include/uapi/linux/fsi.h +++ b/include/uapi/linux/fsi.h @@ -55,4 +55,18 @@ struct scom_access { #define FSI_SCOM_WRITE _IOWR('s', 0x02, struct scom_access) #define FSI_SCOM_RESET _IOW('s', 0x03, __u32) +/* + * /dev/sbefifo* ioctl interface + */ + +/** + * FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE. + * + * The read timeout is specified in seconds. The minimum value of read + * timeout is 10 seconds (default) and the maximum value of read timeout is + * 120 seconds. A read timeout of 0 will reset the value to the default of + * (10 seconds). + */ +#define FSI_SBEFIFO_READ_TIMEOUT_SECONDS _IOW('s', 0x00, __u32) + #endif /* _UAPI_LINUX_FSI_H */ -- cgit v1.2.3-71-gd317 From 129e3c1bab24d27d0fa6e505a472345a92d7a2b0 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 21 Feb 2022 13:54:57 +0800 Subject: bonding: add new option ns_ip6_target This patch add a new bonding option ns_ip6_target, which correspond to the arp_ip_target. With this we set IPv6 targets and send IPv6 NS request to determine the health of the link. For other related options like the validation, we still use arp_validate, and will change to ns_validate later. Note: the sysfs configuration support was removed based on https://lore.kernel.org/netdev/8863.1645071997@famine Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- Documentation/networking/bonding.rst | 11 ++++++ drivers/net/bonding/bond_netlink.c | 59 +++++++++++++++++++++++++++++ drivers/net/bonding/bond_options.c | 72 ++++++++++++++++++++++++++++++++++++ include/net/bond_options.h | 4 ++ include/net/bonding.h | 7 ++++ include/uapi/linux/if_link.h | 1 + tools/include/uapi/linux/if_link.h | 1 + 7 files changed, 155 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst index ab98373535ea..525e6842dd33 100644 --- a/Documentation/networking/bonding.rst +++ b/Documentation/networking/bonding.rst @@ -313,6 +313,17 @@ arp_ip_target maximum number of targets that can be specified is 16. The default value is no IP addresses. +ns_ip6_target + + Specifies the IPv6 addresses to use as IPv6 monitoring peers when + arp_interval is > 0. These are the targets of the NS request + sent to determine the health of the link to the targets. + Specify these values in ffff:ffff::ffff:ffff format. Multiple IPv6 + addresses must be separated by a comma. At least one IPv6 + address must be given for NS/NA monitoring to function. The + maximum number of targets that can be specified is 16. The + default value is no IPv6 addresses. + arp_validate Specifies whether or not ARP probes and replies should be diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 1007bf6d385d..f427fa1737c7 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -14,6 +14,7 @@ #include #include #include +#include static size_t bond_get_slave_size(const struct net_device *bond_dev, const struct net_device *slave_dev) @@ -111,6 +112,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 }, [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 }, + [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -272,6 +274,40 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], if (err) return err; } +#if IS_ENABLED(CONFIG_IPV6) + if (data[IFLA_BOND_NS_IP6_TARGET]) { + struct nlattr *attr; + int i = 0, rem; + + bond_option_ns_ip6_targets_clear(bond); + nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) { + struct in6_addr addr6; + + if (nla_len(attr) < sizeof(addr6)) { + NL_SET_ERR_MSG(extack, "Invalid IPv6 address"); + return -EINVAL; + } + + addr6 = nla_get_in6_addr(attr); + + if (ipv6_addr_type(&addr6) & IPV6_ADDR_LINKLOCAL) { + NL_SET_ERR_MSG(extack, "Invalid IPv6 addr6"); + return -EINVAL; + } + + bond_opt_initextra(&newval, &addr6, sizeof(addr6)); + err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS, + &newval); + if (err) + break; + i++; + } + if (i == 0 && bond->params.arp_interval) + netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n"); + if (err) + return err; + } +#endif if (data[IFLA_BOND_ARP_VALIDATE]) { int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]); @@ -526,6 +562,9 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */ nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */ + /* IFLA_BOND_NS_IP6_TARGET */ + nla_total_size(sizeof(struct nlattr)) + + nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS + 0; } @@ -603,6 +642,26 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.arp_all_targets)) goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) + targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET); + if (!targets) + goto nla_put_failure; + + targets_added = 0; + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) { + if (!ipv6_addr_any(&bond->params.ns_targets[i])) { + if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i])) + goto nla_put_failure; + targets_added = 1; + } + } + + if (targets_added) + nla_nest_end(skb, targets); + else + nla_nest_cancel(skb, targets); +#endif + primary = rtnl_dereference(bond->primary_slave); if (primary && nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex)) diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ab575135b626..64f7db2627ce 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -34,6 +34,10 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target); static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target); static int bond_option_arp_ip_targets_set(struct bonding *bond, const struct bond_opt_value *newval); +#if IS_ENABLED(CONFIG_IPV6) +static int bond_option_ns_ip6_targets_set(struct bonding *bond, + const struct bond_opt_value *newval); +#endif static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval); static int bond_option_arp_all_targets_set(struct bonding *bond, @@ -295,6 +299,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .flags = BOND_OPTFLAG_RAWVAL, .set = bond_option_arp_ip_targets_set }, +#if IS_ENABLED(CONFIG_IPV6) + [BOND_OPT_NS_TARGETS] = { + .id = BOND_OPT_NS_TARGETS, + .name = "ns_ip6_target", + .desc = "NS targets in ffff:ffff::ffff:ffff form", + .flags = BOND_OPTFLAG_RAWVAL, + .set = bond_option_ns_ip6_targets_set + }, +#endif [BOND_OPT_DOWNDELAY] = { .id = BOND_OPT_DOWNDELAY, .name = "downdelay", @@ -1184,6 +1197,65 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, return ret; } +#if IS_ENABLED(CONFIG_IPV6) +static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, + struct in6_addr *target, + unsigned long last_rx) +{ + struct in6_addr *targets = bond->params.ns_targets; + struct list_head *iter; + struct slave *slave; + + if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { + bond_for_each_slave(bond, slave, iter) + slave->target_last_arp_rx[slot] = last_rx; + targets[slot] = *target; + } +} + +void bond_option_ns_ip6_targets_clear(struct bonding *bond) +{ + struct in6_addr addr_any = in6addr_any; + int i; + + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) + _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0); +} + +static int bond_option_ns_ip6_targets_set(struct bonding *bond, + const struct bond_opt_value *newval) +{ + struct in6_addr *target = (struct in6_addr *)newval->extra; + struct in6_addr *targets = bond->params.ns_targets; + struct in6_addr addr_any = in6addr_any; + int index; + + if (!bond_is_ip6_target_ok(target)) { + netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n", + target); + return -EINVAL; + } + + if (bond_get_targets_ip6(targets, target) != -1) { /* dup */ + netdev_err(bond->dev, "NS target %pI6c is already present\n", + target); + return -EINVAL; + } + + index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */ + if (index == -1) { + netdev_err(bond->dev, "NS target table is full!\n"); + return -EINVAL; + } + + netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target); + + _bond_options_ns_ip6_target_set(bond, index, target, jiffies); + + return 0; +} +#endif + static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval) { diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 286b29c6c451..61b49063791c 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -66,6 +66,7 @@ enum { BOND_OPT_PEER_NOTIF_DELAY, BOND_OPT_LACP_ACTIVE, BOND_OPT_MISSED_MAX, + BOND_OPT_NS_TARGETS, BOND_OPT_LAST }; @@ -140,5 +141,8 @@ static inline void __bond_opt_init(struct bond_opt_value *optval, __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len) void bond_option_arp_ip_targets_clear(struct bonding *bond); +#if IS_ENABLED(CONFIG_IPV6) +void bond_option_ns_ip6_targets_clear(struct bonding *bond); +#endif #endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index f3b986f6b6e4..d0dfe727e0b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -503,6 +503,13 @@ static inline int bond_is_ip_target_ok(__be32 addr) return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); } +static inline int bond_is_ip6_target_ok(struct in6_addr *addr) +{ + return !ipv6_addr_any(addr) && + !ipv6_addr_loopback(addr) && + !ipv6_addr_is_multicast(addr); +} + /* Get the oldest arp which we've received on this slave for bond's * arp_targets. */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6218f93f5c1a..e1ba2d51b717 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -860,6 +860,7 @@ enum { IFLA_BOND_PEER_NOTIF_DELAY, IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, + IFLA_BOND_NS_IP6_TARGET, __IFLA_BOND_MAX, }; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 6218f93f5c1a..e1ba2d51b717 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -860,6 +860,7 @@ enum { IFLA_BOND_PEER_NOTIF_DELAY, IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, + IFLA_BOND_NS_IP6_TARGET, __IFLA_BOND_MAX, }; -- cgit v1.2.3-71-gd317 From c086df4902573e2f06c6a2a83452c13a8bc603f5 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 10 Jan 2022 18:52:52 -0500 Subject: fuse: move FUSE_SUPER_MAGIC definition to magic.h ...to help userland apps that need to identify FUSE mounts. Signed-off-by: Jeff Layton Signed-off-by: Miklos Szeredi --- fs/fuse/inode.c | 3 +-- include/uapi/linux/magic.h | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index ee846ce371d8..9ee36aa73251 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -23,6 +23,7 @@ #include #include #include +#include MODULE_AUTHOR("Miklos Szeredi "); MODULE_DESCRIPTION("Filesystem in Userspace"); @@ -50,8 +51,6 @@ MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); -#define FUSE_SUPER_MAGIC 0x65735546 - #define FUSE_DEFAULT_BLKSIZE 512 /** Maximum number of outstanding background requests */ diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 0425cd79af9a..f724129c0425 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -36,6 +36,7 @@ #define EFIVARFS_MAGIC 0xde5e81e4 #define HOSTFS_SUPER_MAGIC 0x00c0ffee #define OVERLAYFS_SUPER_MAGIC 0x794c7630 +#define FUSE_SUPER_MAGIC 0x65735546 #define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ #define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ -- cgit v1.2.3-71-gd317 From d43583b890e7cb0078d13d056753a56602b92406 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 21 Feb 2022 15:35:23 +0000 Subject: KVM: arm64: Expose PSCI SYSTEM_RESET2 call to the guest PSCI v1.1 introduces the optional SYSTEM_RESET2 call, which allows the caller to provide a vendor-specific "reset type" and "cookie" to request a particular form of reset or shutdown. Expose this call to the guest and handle it in the same way as PSCI SYSTEM_RESET, along with some basic range checking on the type argument. Cc: Marc Zyngier Cc: James Morse Cc: Alexandru Elisei Cc: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220221153524.15397-3-will@kernel.org --- arch/arm64/kvm/psci.c | 33 +++++++++++++++++++++++++++++---- include/uapi/linux/psci.h | 4 ++++ 2 files changed, 33 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 70d07477e712..30fcc5a99483 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -308,7 +308,7 @@ out: static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) { u32 psci_fn = smccc_get_function(vcpu); - u32 feature; + u32 arg; unsigned long val; int ret = 1; @@ -320,12 +320,12 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1; break; case PSCI_1_0_FN_PSCI_FEATURES: - feature = smccc_get_arg1(vcpu); - val = kvm_psci_check_allowed_function(vcpu, feature); + arg = smccc_get_arg1(vcpu); + val = kvm_psci_check_allowed_function(vcpu, arg); if (val) break; - switch(feature) { + switch(arg) { case PSCI_0_2_FN_PSCI_VERSION: case PSCI_0_2_FN_CPU_SUSPEND: case PSCI_0_2_FN64_CPU_SUSPEND: @@ -341,11 +341,36 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) case ARM_SMCCC_VERSION_FUNC_ID: val = 0; break; + case PSCI_1_1_FN_SYSTEM_RESET2: + case PSCI_1_1_FN64_SYSTEM_RESET2: + if (minor >= 1) { + val = 0; + break; + } + fallthrough; default: val = PSCI_RET_NOT_SUPPORTED; break; } break; + case PSCI_1_1_FN_SYSTEM_RESET2: + kvm_psci_narrow_to_32bit(vcpu); + fallthrough; + case PSCI_1_1_FN64_SYSTEM_RESET2: + if (minor >= 1) { + arg = smccc_get_arg1(vcpu); + + if (arg > PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET && + arg < PSCI_1_1_RESET_TYPE_VENDOR_START) { + val = PSCI_RET_INVALID_PARAMS; + } else { + kvm_psci_system_reset(vcpu); + val = PSCI_RET_INTERNAL_FAILURE; + ret = 0; + } + break; + }; + fallthrough; default: return kvm_psci_0_2_call(vcpu); } diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 2fcad1dd0b0e..2bf93c0d6354 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -82,6 +82,10 @@ #define PSCI_0_2_TOS_UP_NO_MIGRATE 1 #define PSCI_0_2_TOS_MP 2 +/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */ +#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 +#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ -- cgit v1.2.3-71-gd317 From a603ca60cebff8589882427a67f870ed946b3fc8 Mon Sep 17 00:00:00 2001 From: Zev Weiss Date: Thu, 10 Feb 2022 16:42:03 -0800 Subject: serial: 8250_aspeed_vuart: add PORT_ASPEED_VUART port type Commit 54da3e381c2b ("serial: 8250_aspeed_vuart: use UPF_IOREMAP to set up register mapping") fixed a bug that had, as a side-effect, prevented the 8250_aspeed_vuart driver from enabling the VUART's FIFOs. However, fixing that (and hence enabling the FIFOs) has in turn revealed what appears to be a hardware bug in the ASPEED VUART in which the host-side THRE bit doesn't get if the BMC-side receive FIFO trigger level is set to anything but one byte. This causes problems for polled-mode writes from the host -- for example, Linux kernel console writes proceed at a glacial pace (less than 100 bytes per second) because the write path waits for a 10ms timeout to expire after every character instead of being able to continue on to the next character upon seeing THRE asserted. (GRUB behaves similarly.) As a workaround, introduce a new port type for the ASPEED VUART that's identical to PORT_16550A as it had previously been using, but with UART_FCR_R_TRIG_00 instead to set the receive FIFO trigger level to one byte, which (experimentally) seems to avoid the problematic THRE behavior. Fixes: 54da3e381c2b ("serial: 8250_aspeed_vuart: use UPF_IOREMAP to set up register mapping") Tested-by: Konstantin Aladyshev Reviewed-by: Andy Shevchenko Signed-off-by: Zev Weiss Link: https://lore.kernel.org/r/20220211004203.14915-1-zev@bewilderbeest.net Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_aspeed_vuart.c | 2 +- drivers/tty/serial/8250/8250_port.c | 8 ++++++++ include/uapi/linux/serial_core.h | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 2350fb3bb5e4..c2cecc6f47db 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -487,7 +487,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev) port.port.irq = irq_of_parse_and_map(np, 0); port.port.handle_irq = aspeed_vuart_handle_irq; port.port.iotype = UPIO_MEM; - port.port.type = PORT_16550A; + port.port.type = PORT_ASPEED_VUART; port.port.uartclk = clk; port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST; diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 3b12bfc1ed67..973870ebff69 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -307,6 +307,14 @@ static const struct serial8250_config uart_config[] = { .rxtrig_bytes = {1, 32, 64, 112}, .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, + [PORT_ASPEED_VUART] = { + .name = "ASPEED VUART", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index c4042dcfdc0c..8885e69178bd 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -68,6 +68,9 @@ /* NVIDIA Tegra Combined UART */ #define PORT_TEGRA_TCU 41 +/* ASPEED AST2x00 virtual UART */ +#define PORT_ASPEED_VUART 42 + /* Intel EG20 */ #define PORT_PCH_8LINE 44 #define PORT_PCH_2LINE 45 -- cgit v1.2.3-71-gd317 From a1a5cfe70cd29a59a9a85290dfe95ed1c8df1193 Mon Sep 17 00:00:00 2001 From: Cosmin Tanislav Date: Mon, 14 Feb 2022 09:38:06 +0200 Subject: iio: introduce mag_referenced Some accelerometers that support activity and inactivity events also support a referenced mode, in which the gravitational acceleration is taken as a point of reference before comparing the acceleration to the specified activity and inactivity magnitude. For example, in the case of the ADXL367, for activity detection, the formula is: abs(acceleration - reference) > magnitude Add a new event type that makes this behavior clear. Signed-off-by: Cosmin Tanislav Link: https://lore.kernel.org/r/20220214073810.781016-2-cosmin.tanislav@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-event.c | 1 + include/uapi/linux/iio/types.h | 1 + tools/iio/iio_event_monitor.c | 1 + 3 files changed, 3 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index d0732eac0f0a..ce8b102ce52f 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -230,6 +230,7 @@ static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", [IIO_EV_TYPE_CHANGE] = "change", + [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced", }; static const char * const iio_ev_dir_text[] = { diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index 48c13147c0a8..472cead10d8d 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -104,6 +104,7 @@ enum iio_event_type { IIO_EV_TYPE_THRESH_ADAPTIVE, IIO_EV_TYPE_MAG_ADAPTIVE, IIO_EV_TYPE_CHANGE, + IIO_EV_TYPE_MAG_REFERENCED, }; enum iio_event_direction { diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c index b94a16ba5c6c..2f4581658859 100644 --- a/tools/iio/iio_event_monitor.c +++ b/tools/iio/iio_event_monitor.c @@ -68,6 +68,7 @@ static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", [IIO_EV_TYPE_CHANGE] = "change", + [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced", }; static const char * const iio_ev_dir_text[] = { -- cgit v1.2.3-71-gd317 From 0fbb4d93b38bce1f8235aacfa37e90ad8f011473 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 17 Feb 2022 23:40:32 -0500 Subject: dm: add dm_submit_bio_remap interface Where possible, switch from early bio-based IO accounting (at the time DM clones each incoming bio) to late IO accounting just before each remapped bio is issued to underlying device via submit_bio_noacct(). Allows more precise bio-based IO accounting for DM targets that use their own workqueues to perform additional processing of each bio in conjunction with their DM_MAPIO_SUBMITTED return from their map function. When a target is updated to use dm_submit_bio_remap() they must also set ti->accounts_remapped_io to true. Use xchg() in start_io_acct(), as suggested by Mikulas, to ensure each IO is only started once. The xchg race only happens if __send_duplicate_bios() sends multiple bios -- that case is reflected via tio->is_duplicate_bio. Given the niche nature of this race, it is best to avoid any xchg performance penalty for normal IO. For IO that was never submitted with dm_bio_submit_remap(), but the target completes the clone with bio_endio, accounting is started then ended and pending_io counter decremented. Reviewed-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-core.h | 2 + drivers/md/dm.c | 127 +++++++++++++++++++++++++++++++++++------- include/linux/device-mapper.h | 7 +++ include/uapi/linux/dm-ioctl.h | 4 +- 4 files changed, 118 insertions(+), 22 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 33ef92e90462..8078b6c155ef 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -232,6 +232,8 @@ struct dm_io { struct mapped_device *md; struct bio *orig_bio; blk_status_t status; + bool start_io_acct:1; + int was_accounted; unsigned long start_time; spinlock_t endio_lock; struct dm_stats_aux stats_aux; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d1c618c3f6c6..082366d0ad49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -518,14 +518,33 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, bio->bi_iter.bi_size = bi_size; } -static void dm_start_io_acct(struct dm_io *io) +static void __dm_start_io_acct(struct dm_io *io, struct bio *bio) { - dm_io_acct(false, io->md, io->orig_bio, io->start_time, &io->stats_aux); + dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux); } -static void dm_end_io_acct(struct dm_io *io) +static void dm_start_io_acct(struct dm_io *io, struct bio *clone) { - dm_io_acct(true, io->md, io->orig_bio, io->start_time, &io->stats_aux); + /* Must account IO to DM device in terms of orig_bio */ + struct bio *bio = io->orig_bio; + + /* + * Ensure IO accounting is only ever started once. + * Expect no possibility for race unless is_duplicate_bio. + */ + if (!clone || likely(!clone_to_tio(clone)->is_duplicate_bio)) { + if (WARN_ON(io->was_accounted)) + return; + io->was_accounted = 1; + } else if (xchg(&io->was_accounted, 1) == 1) + return; + + __dm_start_io_acct(io, bio); +} + +static void dm_end_io_acct(struct dm_io *io, struct bio *bio) +{ + dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux); } static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) @@ -545,11 +564,13 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) io->status = 0; atomic_set(&io->io_count, 1); this_cpu_inc(*md->pending_io); - io->orig_bio = bio; + io->orig_bio = NULL; io->md = md; spin_lock_init(&io->endio_lock); io->start_time = jiffies; + io->start_io_acct = false; + io->was_accounted = 0; dm_stats_record_start(&md->stats, &io->stats_aux); @@ -849,7 +870,16 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error) } io_error = io->status; - dm_end_io_acct(io); + if (io->was_accounted) + dm_end_io_acct(io, bio); + else if (!io_error) { + /* + * Must handle target that DM_MAPIO_SUBMITTED only to + * then bio_endio() rather than dm_submit_bio_remap() + */ + __dm_start_io_acct(io, bio); + dm_end_io_acct(io, bio); + } free_io(io); smp_wmb(); this_cpu_dec(*md->pending_io); @@ -1131,6 +1161,56 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); +static inline void __dm_submit_bio_remap(struct bio *clone, + dev_t dev, sector_t old_sector) +{ + trace_block_bio_remap(clone, dev, old_sector); + submit_bio_noacct(clone); +} + +/* + * @clone: clone bio that DM core passed to target's .map function + * @tgt_clone: clone of @clone bio that target needs submitted + * @from_wq: caller is a workqueue thread managed by DM target + * + * Targets should use this interface to submit bios they take + * ownership of when returning DM_MAPIO_SUBMITTED. + * + * Target should also enable ti->accounts_remapped_io + */ +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, + bool from_wq) +{ + struct dm_target_io *tio = clone_to_tio(clone); + struct dm_io *io = tio->io; + + /* establish bio that will get submitted */ + if (!tgt_clone) + tgt_clone = clone; + + /* + * Account io->origin_bio to DM dev on behalf of target + * that took ownership of IO with DM_MAPIO_SUBMITTED. + */ + if (!from_wq) { + /* Still in target's map function */ + io->start_io_acct = true; + } else { + /* + * Called by another thread, managed by DM target, + * wait for dm_split_and_process_bio() to store + * io->orig_bio + */ + while (unlikely(!smp_load_acquire(&io->orig_bio))) + msleep(1); + dm_start_io_acct(io, clone); + } + + __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk), + tio->old_sector); +} +EXPORT_SYMBOL_GPL(dm_submit_bio_remap); + static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) { mutex_lock(&md->swap_bios_lock); @@ -1157,9 +1237,7 @@ static void __map_bio(struct bio *clone) clone->bi_end_io = clone_endio; /* - * Map the clone. If r == 0 we don't need to do - * anything, the target has assumed ownership of - * this io. + * Map the clone. */ dm_io_inc_pending(io); tio->old_sector = clone->bi_iter.bi_sector; @@ -1184,12 +1262,18 @@ static void __map_bio(struct bio *clone) switch (r) { case DM_MAPIO_SUBMITTED: + /* target has assumed ownership of this io */ + if (!ti->accounts_remapped_io) + io->start_io_acct = true; break; case DM_MAPIO_REMAPPED: - /* the bio has been remapped so dispatch it */ - trace_block_bio_remap(clone, bio_dev(io->orig_bio), + /* + * the bio has been remapped so dispatch it, but defer + * dm_start_io_acct() until after possible bio_split(). + */ + __dm_submit_bio_remap(clone, disk_devt(io->md->disk), tio->old_sector); - submit_bio_noacct(clone); + io->start_io_acct = true; break; case DM_MAPIO_KILL: case DM_MAPIO_REQUEUE: @@ -1404,7 +1488,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; - struct bio *b; + struct bio *orig_bio = NULL; int error = 0; init_clone_info(&ci, md, map, bio); @@ -1426,15 +1510,18 @@ static void dm_split_and_process_bio(struct mapped_device *md, * used by dm_end_io_acct() and for dm_io_dec_pending() to use for * completion handling. */ - b = bio_split(bio, bio_sectors(bio) - ci.sector_count, - GFP_NOIO, &md->queue->bio_split); - ci.io->orig_bio = b; - - bio_chain(b, bio); - trace_block_split(b, bio->bi_iter.bi_sector); + orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count, + GFP_NOIO, &md->queue->bio_split); + bio_chain(orig_bio, bio); + trace_block_split(orig_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); out: - dm_start_io_acct(ci.io); + if (!orig_bio) + orig_bio = bio; + smp_store_release(&ci.io->orig_bio, orig_bio); + if (ci.io->start_io_acct) + dm_start_io_acct(ci.io, NULL); + /* drop the extra reference count */ dm_io_dec_pending(ci.io, errno_to_blk_status(error)); } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b26fecf6c8e8..7752d14d13f8 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -362,6 +362,12 @@ struct dm_target { * zone append operations using regular writes. */ bool emulate_zone_append:1; + + /* + * Set if the target will submit IO using dm_submit_bio_remap() + * after returning DM_MAPIO_SUBMITTED from its map function. + */ + bool accounts_remapped_io:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -465,6 +471,7 @@ int dm_suspended(struct dm_target *ti); int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone, bool from_wq); union map_info *dm_get_rq_mapinfo(struct request *rq); #ifdef CONFIG_BLK_DEV_ZONED diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index c12ce30b52df..2e9550fef90f 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -286,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 45 +#define DM_VERSION_MINOR 46 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2021-03-22)" +#define DM_VERSION_EXTRA "-ioctl (2022-02-22)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ -- cgit v1.2.3-71-gd317 From 93b71801a8274cd9511557faf04365a5de487197 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 22 Feb 2022 09:06:54 -0500 Subject: KVM: PPC: reserve capability 210 for KVM_CAP_PPC_AIL_MODE_3 Add KVM_CAP_PPC_AIL_MODE_3 to advertise the capability to set the AIL resource mode to 3 with the H_SET_MODE hypercall. This capability differs between processor types and KVM types (PR, HV, Nested HV), and affects guest-visible behaviour. QEMU will implement a cap-ail-mode-3 to control this behaviour[1], and use the KVM CAP if available to determine KVM support[2]. Reviewed-by: Fabiano Rosas Signed-off-by: Nicholas Piggin Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 14 ++++++++++++++ include/uapi/linux/kvm.h | 1 + tools/include/uapi/linux/kvm.h | 1 + 3 files changed, 16 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a4267104db50..9954568c7eab 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6997,6 +6997,20 @@ indicated by the fd to the VM this is called on. This is intended to support intra-host migration of VMs between userspace VMMs, upgrading the VMM process without interrupting the guest. +7.30 KVM_CAP_PPC_AIL_MODE_3 +------------------------------- + +:Capability: KVM_CAP_PPC_AIL_MODE_3 +:Architectures: ppc +:Type: vm + +This capability indicates that the kernel supports the mode 3 setting for the +"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location" +resource that is controlled with the H_SET_MODE hypercall. + +This capability allows a guest kernel to use a better-performance mode for +handling interrupts and system calls. + 8. Other capabilities. ====================== diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5191b57e1562..507ee1f2aa96 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_GPA_BITS 207 #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 +#define KVM_CAP_PPC_AIL_MODE_3 210 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 5191b57e1562..507ee1f2aa96 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_GPA_BITS 207 #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 +#define KVM_CAP_PPC_AIL_MODE_3 210 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From a21d9a670d81103db7f788de1a4a4a6e4b891a0b Mon Sep 17 00:00:00 2001 From: Hans Schultz Date: Wed, 23 Feb 2022 11:16:46 +0100 Subject: net: bridge: Add support for bridge port in locked mode In a 802.1X scenario, clients connected to a bridge port shall not be allowed to have traffic forwarded until fully authenticated. A static fdb entry of the clients MAC address for the bridge port unlocks the client and allows bidirectional communication. This scenario is facilitated with setting the bridge port in locked mode, which is also supported by various switchcore chipsets. Signed-off-by: Hans Schultz Acked-by: Nikolay Aleksandrov Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + include/uapi/linux/if_link.h | 1 + net/bridge/br_input.c | 11 ++++++++++- net/bridge/br_netlink.c | 6 +++++- 4 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 509e18c7e740..3aae023a9353 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -58,6 +58,7 @@ struct br_ip_list { #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) #define BR_TX_FWD_OFFLOAD BIT(20) +#define BR_PORT_LOCKED BIT(21) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index e1ba2d51b717..be09d2ad4b5d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -537,6 +537,7 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index b50382f957c1..e0c13fcc50ed 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -81,6 +81,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (!p || p->state == BR_STATE_DISABLED) goto drop; + br = p->br; brmctx = &p->br->multicast_ctx; pmctx = &p->multicast_ctx; state = p->state; @@ -88,10 +89,18 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb &state, &vlan)) goto out; + if (p->flags & BR_PORT_LOCKED) { + struct net_bridge_fdb_entry *fdb_src = + br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid); + + if (!fdb_src || READ_ONCE(fdb_src->dst) != p || + test_bit(BR_FDB_LOCAL, &fdb_src->flags)) + goto drop; + } + nbp_switchdev_frame_mark(p, skb); /* insert into forwarding database after filtering to avoid spoofing */ - br = p->br; if (p->flags & BR_LEARNING) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff83d84230d..7d4432ca9a20 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -184,6 +184,7 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */ + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */ + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */ + + nla_total_size(1) /* IFLA_BRPORT_LOCKED */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ @@ -269,7 +270,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, BR_MRP_LOST_CONT)) || nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, !!(p->flags & BR_MRP_LOST_IN_CONT)) || - nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) + nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) || + nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED))) return -EMSGSIZE; timerval = br_timer_value(&p->message_age_timer); @@ -827,6 +829,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 }, [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 }, [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 }, + [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 }, [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 }, [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 }, }; @@ -893,6 +896,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL); br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS); br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED); + br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED); changed_mask = old_flags ^ p->flags; -- cgit v1.2.3-71-gd317 From 1241e329ce2e1f5b1039fd356b75867b29721ad2 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Wed, 23 Feb 2022 00:09:12 +0530 Subject: ethtool: add support to set/get completion queue event size Add support to set completion queue event size via ethtool -G parameter and get it via ethtool -g parameter. ~ # ./ethtool -G eth0 cqe-size 512 ~ # ./ethtool -g eth0 Ring parameters for eth0: Pre-set maximums: RX: 1048576 RX Mini: n/a RX Jumbo: n/a TX: 1048576 Current hardware settings: RX: 256 RX Mini: n/a RX Jumbo: n/a TX: 4096 RX Buf Len: 2048 CQE Size: 128 Signed-off-by: Subbaraya Sundeep Signed-off-by: Sunil Goutham Signed-off-by: Jakub Kicinski --- Documentation/networking/ethtool-netlink.rst | 11 +++++++++++ include/linux/ethtool.h | 4 ++++ include/uapi/linux/ethtool_netlink.h | 1 + net/ethtool/netlink.h | 2 +- net/ethtool/rings.c | 19 +++++++++++++++++-- 5 files changed, 34 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index cae28af7a476..24d9be69065d 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -861,6 +861,7 @@ Kernel response contents: ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split + ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ==================================== ====== =========================== ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with @@ -885,6 +886,7 @@ Request contents: ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring ``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring + ``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE ==================================== ====== =========================== Kernel checks that requested ring sizes do not exceed limits reported by @@ -892,6 +894,15 @@ driver. Driver may impose additional constraints and may not suspport all attributes. +``ETHTOOL_A_RINGS_CQE_SIZE`` specifies the completion queue event size. +Completion queue events(CQE) are the events posted by NIC to indicate the +completion status of a packet when the packet is sent(like send success or +error) or received(like pointers to packet fragments). The CQE size parameter +enables to modify the CQE size other than default size if NIC supports it. +A bigger CQE can have more receive buffer pointers inturn NIC can transfer +a bigger frame from wire. Based on the NIC hardware, the overall completion +queue size can be adjusted in the driver if CQE size is modified. + CHANNELS_GET ============ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e0853f48b75e..4af58459a1e7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -71,18 +71,22 @@ enum { * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. * @tcp_data_split: Scatter packet headers and data to separate buffers + * @cqe_size: Size of TX/RX completion queue event */ struct kernel_ethtool_ringparam { u32 rx_buf_len; u8 tcp_data_split; + u32 cqe_size; }; /** * enum ethtool_supported_ring_param - indicator caps for setting ring params * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len + * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), + ETHTOOL_RING_USE_CQE_SIZE = BIT(1), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 417d4280d7b5..979850221b8d 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -337,6 +337,7 @@ enum { ETHTOOL_A_RINGS_TX, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ + ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 75856db299e9..29d01662a48b 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -363,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1]; extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1]; extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1]; -extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1]; +extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_CQE_SIZE + 1]; extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1]; extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1]; extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1]; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 18a5035d3bee..9f33c9689b56 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -54,7 +54,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ nla_total_size(sizeof(u32)) + /* _RINGS_TX */ nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ - nla_total_size(sizeof(u8)); /* _RINGS_TCP_DATA_SPLIT */ + nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ + nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ } static int rings_fill_reply(struct sk_buff *skb, @@ -91,7 +92,9 @@ static int rings_fill_reply(struct sk_buff *skb, (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || (kr->tcp_data_split && (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, - kr->tcp_data_split)))) + kr->tcp_data_split))) || + (kr->cqe_size && + (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) return -EMSGSIZE; return 0; @@ -119,6 +122,7 @@ const struct nla_policy ethnl_rings_set_policy[] = { [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), + [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), }; int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) @@ -159,6 +163,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); ethnl_update_u32(&kernel_ringparam.rx_buf_len, tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); + ethnl_update_u32(&kernel_ringparam.cqe_size, + tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); ret = 0; if (!mod) goto out_ops; @@ -190,6 +196,15 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) goto out_ops; } + if (kernel_ringparam.cqe_size && + !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { + ret = -EOPNOTSUPP; + NL_SET_ERR_MSG_ATTR(info->extack, + tb[ETHTOOL_A_RINGS_CQE_SIZE], + "setting cqe size not supported"); + goto out_ops; + } + ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); if (ret < 0) -- cgit v1.2.3-71-gd317 From 28a3f0601727d521a1c6cce62ecbcb7402a9e4f5 Mon Sep 17 00:00:00 2001 From: Toms Atteka Date: Wed, 23 Feb 2022 16:54:09 -0800 Subject: net: openvswitch: IPv6: Add IPv6 extension header support This change adds a new OpenFlow field OFPXMT_OFB_IPV6_EXTHDR and packets can be filtered using ipv6_ext flag. Signed-off-by: Toms Atteka Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 6 ++ net/openvswitch/flow.c | 140 +++++++++++++++++++++++++++++++++++++++ net/openvswitch/flow.h | 14 ++++ net/openvswitch/flow_netlink.c | 26 +++++++- 4 files changed, 184 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 150bcff49b1c..9d1710f20505 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -351,6 +351,7 @@ enum ovs_key_attr { OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, /* struct ovs_key_ct_tuple_ipv4 */ OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */ OVS_KEY_ATTR_NSH, /* Nested set of ovs_nsh_key_* */ + OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -430,6 +431,11 @@ struct ovs_key_ipv6 { __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ }; +/* separate structure to support backward compatibility with older user space */ +struct ovs_key_ipv6_exthdrs { + __u16 hdrs; +}; + struct ovs_key_tcp { __be16 tcp_src; __be16 tcp_dst; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index f6cd24fd530c..8df73d86b968 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -241,6 +241,144 @@ static bool icmphdr_ok(struct sk_buff *skb) sizeof(struct icmphdr)); } +/** + * get_ipv6_ext_hdrs() - Parses packet and sets IPv6 extension header flags. + * + * @skb: buffer where extension header data starts in packet + * @nh: ipv6 header + * @ext_hdrs: flags are stored here + * + * OFPIEH12_UNREP is set if more than one of a given IPv6 extension header + * is unexpectedly encountered. (Two destination options headers may be + * expected and would not cause this bit to be set.) + * + * OFPIEH12_UNSEQ is set if IPv6 extension headers were not in the order + * preferred (but not required) by RFC 2460: + * + * When more than one extension header is used in the same packet, it is + * recommended that those headers appear in the following order: + * IPv6 header + * Hop-by-Hop Options header + * Destination Options header + * Routing header + * Fragment header + * Authentication header + * Encapsulating Security Payload header + * Destination Options header + * upper-layer header + */ +static void get_ipv6_ext_hdrs(struct sk_buff *skb, struct ipv6hdr *nh, + u16 *ext_hdrs) +{ + u8 next_type = nh->nexthdr; + unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); + int dest_options_header_count = 0; + + *ext_hdrs = 0; + + while (ipv6_ext_hdr(next_type)) { + struct ipv6_opt_hdr _hdr, *hp; + + switch (next_type) { + case IPPROTO_NONE: + *ext_hdrs |= OFPIEH12_NONEXT; + /* stop parsing */ + return; + + case IPPROTO_ESP: + if (*ext_hdrs & OFPIEH12_ESP) + *ext_hdrs |= OFPIEH12_UNREP; + if ((*ext_hdrs & ~(OFPIEH12_HOP | OFPIEH12_DEST | + OFPIEH12_ROUTER | IPPROTO_FRAGMENT | + OFPIEH12_AUTH | OFPIEH12_UNREP)) || + dest_options_header_count >= 2) { + *ext_hdrs |= OFPIEH12_UNSEQ; + } + *ext_hdrs |= OFPIEH12_ESP; + break; + + case IPPROTO_AH: + if (*ext_hdrs & OFPIEH12_AUTH) + *ext_hdrs |= OFPIEH12_UNREP; + if ((*ext_hdrs & + ~(OFPIEH12_HOP | OFPIEH12_DEST | OFPIEH12_ROUTER | + IPPROTO_FRAGMENT | OFPIEH12_UNREP)) || + dest_options_header_count >= 2) { + *ext_hdrs |= OFPIEH12_UNSEQ; + } + *ext_hdrs |= OFPIEH12_AUTH; + break; + + case IPPROTO_DSTOPTS: + if (dest_options_header_count == 0) { + if (*ext_hdrs & + ~(OFPIEH12_HOP | OFPIEH12_UNREP)) + *ext_hdrs |= OFPIEH12_UNSEQ; + *ext_hdrs |= OFPIEH12_DEST; + } else if (dest_options_header_count == 1) { + if (*ext_hdrs & + ~(OFPIEH12_HOP | OFPIEH12_DEST | + OFPIEH12_ROUTER | OFPIEH12_FRAG | + OFPIEH12_AUTH | OFPIEH12_ESP | + OFPIEH12_UNREP)) { + *ext_hdrs |= OFPIEH12_UNSEQ; + } + } else { + *ext_hdrs |= OFPIEH12_UNREP; + } + dest_options_header_count++; + break; + + case IPPROTO_FRAGMENT: + if (*ext_hdrs & OFPIEH12_FRAG) + *ext_hdrs |= OFPIEH12_UNREP; + if ((*ext_hdrs & ~(OFPIEH12_HOP | + OFPIEH12_DEST | + OFPIEH12_ROUTER | + OFPIEH12_UNREP)) || + dest_options_header_count >= 2) { + *ext_hdrs |= OFPIEH12_UNSEQ; + } + *ext_hdrs |= OFPIEH12_FRAG; + break; + + case IPPROTO_ROUTING: + if (*ext_hdrs & OFPIEH12_ROUTER) + *ext_hdrs |= OFPIEH12_UNREP; + if ((*ext_hdrs & ~(OFPIEH12_HOP | + OFPIEH12_DEST | + OFPIEH12_UNREP)) || + dest_options_header_count >= 2) { + *ext_hdrs |= OFPIEH12_UNSEQ; + } + *ext_hdrs |= OFPIEH12_ROUTER; + break; + + case IPPROTO_HOPOPTS: + if (*ext_hdrs & OFPIEH12_HOP) + *ext_hdrs |= OFPIEH12_UNREP; + /* OFPIEH12_HOP is set to 1 if a hop-by-hop IPv6 + * extension header is present as the first + * extension header in the packet. + */ + if (*ext_hdrs == 0) + *ext_hdrs |= OFPIEH12_HOP; + else + *ext_hdrs |= OFPIEH12_UNSEQ; + break; + + default: + return; + } + + hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); + if (!hp) + break; + next_type = hp->nexthdr; + start += ipv6_optlen(hp); + }; +} + static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) { unsigned short frag_off; @@ -256,6 +394,8 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) nh = ipv6_hdr(skb); + get_ipv6_ext_hdrs(skb, nh, &key->ipv6.exthdrs); + key->ip.proto = NEXTHDR_NONE; key->ip.tos = ipv6_get_dsfield(nh); key->ip.ttl = nh->hop_limit; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 758a8c77f736..073ab73ffeaa 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -32,6 +32,19 @@ enum sw_flow_mac_proto { #define SW_FLOW_KEY_INVALID 0x80 #define MPLS_LABEL_DEPTH 3 +/* Bit definitions for IPv6 Extension Header pseudo-field. */ +enum ofp12_ipv6exthdr_flags { + OFPIEH12_NONEXT = 1 << 0, /* "No next header" encountered. */ + OFPIEH12_ESP = 1 << 1, /* Encrypted Sec Payload header present. */ + OFPIEH12_AUTH = 1 << 2, /* Authentication header present. */ + OFPIEH12_DEST = 1 << 3, /* 1 or 2 dest headers present. */ + OFPIEH12_FRAG = 1 << 4, /* Fragment header present. */ + OFPIEH12_ROUTER = 1 << 5, /* Router header present. */ + OFPIEH12_HOP = 1 << 6, /* Hop-by-hop header present. */ + OFPIEH12_UNREP = 1 << 7, /* Unexpected repeats encountered. */ + OFPIEH12_UNSEQ = 1 << 8 /* Unexpected sequencing encountered. */ +}; + /* Store options at the end of the array if they are less than the * maximum size. This allows us to get the benefits of variable length * matching for small options. @@ -121,6 +134,7 @@ struct sw_flow_key { struct in6_addr dst; /* IPv6 destination address. */ } addr; __be32 label; /* IPv6 flow label. */ + u16 exthdrs; /* IPv6 extension header flags */ union { struct { struct in6_addr src; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index fd1f809e9bc1..8b4124820f7d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -346,7 +346,7 @@ size_t ovs_key_attr_size(void) /* Whenever adding new OVS_KEY_ FIELDS, we should consider * updating this function. */ - BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 29); + BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 30); return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ @@ -369,7 +369,8 @@ size_t ovs_key_attr_size(void) + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ - + nla_total_size(28); /* OVS_KEY_ATTR_ND */ + + nla_total_size(28) /* OVS_KEY_ATTR_ND */ + + nla_total_size(2); /* OVS_KEY_ATTR_IPV6_EXTHDRS */ } static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = { @@ -437,6 +438,8 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) }, [OVS_KEY_ATTR_NSH] = { .len = OVS_ATTR_NESTED, .next = ovs_nsh_key_attr_lens, }, + [OVS_KEY_ATTR_IPV6_EXTHDRS] = { + .len = sizeof(struct ovs_key_ipv6_exthdrs) }, }; static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) @@ -1597,6 +1600,17 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match, attrs &= ~(1 << OVS_KEY_ATTR_IPV6); } + if (attrs & (1ULL << OVS_KEY_ATTR_IPV6_EXTHDRS)) { + const struct ovs_key_ipv6_exthdrs *ipv6_exthdrs_key; + + ipv6_exthdrs_key = nla_data(a[OVS_KEY_ATTR_IPV6_EXTHDRS]); + + SW_FLOW_KEY_PUT(match, ipv6.exthdrs, + ipv6_exthdrs_key->hdrs, is_mask); + + attrs &= ~(1ULL << OVS_KEY_ATTR_IPV6_EXTHDRS); + } + if (attrs & (1 << OVS_KEY_ATTR_ARP)) { const struct ovs_key_arp *arp_key; @@ -2099,6 +2113,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, ipv4_key->ipv4_frag = output->ip.frag; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { struct ovs_key_ipv6 *ipv6_key; + struct ovs_key_ipv6_exthdrs *ipv6_exthdrs_key; nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); if (!nla) @@ -2113,6 +2128,13 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, ipv6_key->ipv6_tclass = output->ip.tos; ipv6_key->ipv6_hlimit = output->ip.ttl; ipv6_key->ipv6_frag = output->ip.frag; + + nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6_EXTHDRS, + sizeof(*ipv6_exthdrs_key)); + if (!nla) + goto nla_put_failure; + ipv6_exthdrs_key = nla_data(nla); + ipv6_exthdrs_key->hdrs = output->ipv6.exthdrs; } else if (swkey->eth.type == htons(ETH_P_NSH)) { if (nsh_key_to_nlattr(&output->nsh, is_mask, skb)) goto nla_put_failure; -- cgit v1.2.3-71-gd317 From ba7bb663f5547ef474c98df99a97bb4a13c5715f Mon Sep 17 00:00:00 2001 From: David Dunn Date: Wed, 23 Feb 2022 22:57:41 +0000 Subject: KVM: x86: Provide per VM capability for disabling PMU virtualization Add a new capability, KVM_CAP_PMU_CAPABILITY, that takes a bitmask of settings/features to allow userspace to configure PMU virtualization on a per-VM basis. For now, support a single flag, KVM_PMU_CAP_DISABLE, to allow disabling PMU virtualization for a VM even when KVM is configured with enable_pmu=true a module level. To keep KVM simple, disallow changing VM's PMU configuration after vCPUs have been created. Signed-off-by: David Dunn Message-Id: <20220223225743.2703915-2-daviddunn@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 22 ++++++++++++++++++++++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/pmu.c | 2 +- arch/x86/kvm/vmx/pmu_intel.c | 2 +- arch/x86/kvm/x86.c | 18 ++++++++++++++++++ include/uapi/linux/kvm.h | 3 +++ tools/include/uapi/linux/kvm.h | 4 ++++ 7 files changed, 50 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c3e68c1531f0..f5d011351016 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7643,3 +7643,25 @@ The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace the hypercalls whose corresponding bit is in the argument, and return ENOSYS for the others. + +8.35 KVM_CAP_PMU_CAPABILITY +--------------------------- + +:Capability KVM_CAP_PMU_CAPABILITY +:Architectures: x86 +:Type: vm +:Parameters: arg[0] is bitmask of PMU virtualization capabilities. +:Returns 0 on success, -EINVAL when arg[0] contains invalid bits + +This capability alters PMU virtualization in KVM. + +Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of +PMU virtualization capabilities that can be adjusted on a VM. + +The argument to KVM_ENABLE_CAP is also a bitmask and selects specific +PMU virtualization capabilities to be applied to the VM. This can +only be invoked on a VM prior to the creation of VCPUs. + +At this time, KVM_PMU_CAP_DISABLE is the only capability. Setting +this capability will disable PMU virtualization for that VM. Usermode +should adjust CPUID leaf 0xA to reflect that the PMU is disabled. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 463171e3b613..dafb5a6220cd 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1147,6 +1147,7 @@ struct kvm_arch { bool exception_payload_enabled; bool bus_lock_detection_enabled; + bool enable_pmu; /* * If exit_on_emulation_error is set, and the in-kernel instruction * emulator fails to emulate an instruction, allow userspace diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 5aa45f13b16d..d4de52409335 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -101,7 +101,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, { struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); - if (!enable_pmu) + if (!vcpu->kvm->arch.enable_pmu) return NULL; switch (msr) { diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 03fab48b149c..4e5b1eeeb77c 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -487,7 +487,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->reserved_bits = 0xffffffff00200000ull; entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); - if (!entry || !enable_pmu) + if (!entry || !vcpu->kvm->arch.enable_pmu) return; eax.full = entry->eax; edx.full = entry->edx; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 16d29d41908f..0b95c379e234 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -110,6 +110,8 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) +#define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE + #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) @@ -4330,6 +4332,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) if (r < sizeof(struct kvm_xsave)) r = sizeof(struct kvm_xsave); break; + case KVM_CAP_PMU_CAPABILITY: + r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; + break; } default: break; @@ -6004,6 +6009,18 @@ split_irqchip_unlock: kvm->arch.exit_on_emulation_error = cap->args[0]; r = 0; break; + case KVM_CAP_PMU_CAPABILITY: + r = -EINVAL; + if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK)) + break; + + mutex_lock(&kvm->lock); + if (!kvm->created_vcpus) { + kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); + r = 0; + } + mutex_unlock(&kvm->lock); + break; default: r = -EINVAL; break; @@ -11586,6 +11603,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); kvm->arch.guest_can_read_msr_platform_info = true; + kvm->arch.enable_pmu = enable_pmu; #if IS_ENABLED(CONFIG_HYPERV) spin_lock_init(&kvm->arch.hv_root_tdp_lock); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a02bbf8fd0f6..d2f1efc3aa35 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1142,6 +1142,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SYS_ATTRIBUTES 209 #define KVM_CAP_PPC_AIL_MODE_3 210 #define KVM_CAP_S390_MEM_OP_EXTENSION 211 +#define KVM_CAP_PMU_CAPABILITY 212 #ifdef KVM_CAP_IRQ_ROUTING @@ -1978,6 +1979,8 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +#define KVM_PMU_CAP_DISABLE (1 << 0) + /** * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. * @flags: Some extra information for header, always 0 for now. diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 507ee1f2aa96..bbc6b7c2dc1b 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1135,6 +1135,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 #define KVM_CAP_PPC_AIL_MODE_3 210 +#define KVM_CAP_S390_MEM_OP_EXTENSION 211 +#define KVM_CAP_PMU_CAPABILITY 212 #ifdef KVM_CAP_IRQ_ROUTING @@ -1971,6 +1973,8 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +#define KVM_PMU_CAP_DISABLE (1 << 0) + /** * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. * @flags: Some extra information for header, always 0 for now. -- cgit v1.2.3-71-gd317 From 43245eca6e670ebf65908b549641c1460a9cc944 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 2 Feb 2022 17:55:02 -0500 Subject: NFSv4.1 support for NFS4_RESULT_PRESERVER_UNLINKED In 4.1+, the server is allowed to set a flag NFS4_RESULT_PRESERVE_UNLINKED in reply to the OPEN, that tells the client that it does not need to do a silly rename of an opened file when it's being removed. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 10 ++++++++-- fs/nfs/nfs4proc.c | 2 ++ include/linux/nfs_fs.h | 1 + include/uapi/linux/nfs4.h | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index fbb4a522d716..8b190c8e4a45 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1419,7 +1419,12 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) if (flags & LOOKUP_REVAL) goto out_force; out: - return (inode->i_nlink == 0) ? -ESTALE : 0; + if (inode->i_nlink > 0 || + (inode->i_nlink == 0 && + test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags))) + return 0; + else + return -ESTALE; out_force: if (flags & LOOKUP_RCU) return -ECHILD; @@ -2330,7 +2335,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) trace_nfs_unlink_enter(dir, dentry); spin_lock(&dentry->d_lock); - if (d_count(dentry) > 1) { + if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED, + &NFS_I(d_inode(dentry))->flags)) { spin_unlock(&dentry->d_lock); /* Start asynchronous writeout of the inode */ write_inode_now(d_inode(dentry), 0); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b3793b82a5e7..73a9b6de666c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3050,6 +3050,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); + if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) + set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); dentry = opendata->dentry; if (d_really_is_negative(dentry)) { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 333ea05e2531..0e79dbbc759a 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -277,6 +277,7 @@ struct nfs4_copy_state { #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ +#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 800bb0ffa6e6..1d2043708bf1 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -45,6 +45,7 @@ #define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 +#define NFS4_OPEN_RESULT_PRESERVE_UNLINKED 0x0008 #define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020 #define NFS4_SHARE_ACCESS_MASK 0x000F -- cgit v1.2.3-71-gd317 From 9e8d5470325f25bed7d33f9faaae6d5e4f313650 Mon Sep 17 00:00:00 2001 From: Hammer Hsieh Date: Tue, 22 Feb 2022 17:36:03 +0800 Subject: serial: sunplus-uart: Add Sunplus SoC UART Driver Add Sunplus SoC UART Driver. SP7021 UART block contains 5 UARTs. There are UART0~4 that supported in SP7021, the features list as below. Support Full-duplex communication. Support data packet length configurable. Support stop bit number configurable. Support force break condition. Support baud rate configurable. Support error detection and report. Support RXD Noise Rejection Vote configurable. UART0 pinout only support TX/RX two pins. UART1 to UART4 pinout support TX/RX/CTS/RTS four pins. Normally UART0 used for kernel console, also can be used for normal uart. Command line set "console=ttySUP0,115200", SUP means Sunplus Uart Port. UART driver probe will create path named "/dev/ttySUPx". https://sunplus.atlassian.net/wiki/spaces/doc/pages/1873412290/13.+Universal+Asynchronous+Receiver+Transmitter+UART Signed-off-by: Hammer Hsieh Link: https://lore.kernel.org/r/1645522563-17183-3-git-send-email-hammerh0314@gmail.com Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + drivers/tty/serial/Kconfig | 25 ++ drivers/tty/serial/Makefile | 1 + drivers/tty/serial/sunplus-uart.c | 770 ++++++++++++++++++++++++++++++++++++++ include/uapi/linux/serial_core.h | 3 + 5 files changed, 800 insertions(+) create mode 100644 drivers/tty/serial/sunplus-uart.c (limited to 'include/uapi/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 5c39f47af44f..4a30001f6d7b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18553,6 +18553,7 @@ SUNPLUS UART DRIVER M: Hammer Hsieh S: Maintained F: Documentation/devicetree/bindings/serial/sunplus,sp7021-uart.yaml +F: drivers/tty/serial/sunplus-uart.c SUPERH M: Yoshinori Sato diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 66916397a19c..e952ec5c7a7c 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1564,6 +1564,31 @@ config SERIAL_LITEUART_CONSOLE and warnings and which allows logins in single user mode). Otherwise, say 'N'. +config SERIAL_SUNPLUS + tristate "Sunplus UART support" + depends on OF || COMPILE_TEST + select SERIAL_CORE + help + Select this option if you would like to use Sunplus serial port on + Sunplus SoC SP7021. + If you enable this option, Sunplus serial ports in the system will + be registered as ttySUPx. + This driver can also be built as a module. If so, the module will be + called sunplus-uart. + +config SERIAL_SUNPLUS_CONSOLE + bool "Console on Sunplus UART" + depends on SERIAL_SUNPLUS + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + Select this option if you would like to use a Sunplus UART as the + system console. + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttySUPx". + endmenu config SERIAL_MCTRL_GPIO diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7da0856cd198..61cc8de95571 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -87,6 +87,7 @@ obj-$(CONFIG_SERIAL_RDA) += rda-uart.o obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o +obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o # GPIOLIB helpers for modem control lines obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c new file mode 100644 index 000000000000..450c8e75abb4 --- /dev/null +++ b/drivers/tty/serial/sunplus-uart.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sunplus SoC UART driver + * + * Author: Hammer Hsieh + * + * Note1: This driver is 8250-like uart, but are not register compatible. + * + * Note2: On some buses, for preventing data incoherence, must do a read + * for ensure write made it to hardware. In this driver, function startup + * and shutdown did not do a read but only do a write directly. For what? + * In Sunplus bus communication between memory bus and peripheral bus with + * posted write, it will send a specific command after last write command + * to make sure write done. Then memory bus identify the specific command + * and send done signal back to master device. After master device received + * done signal, then proceed next write command. It is no need to do a read + * before write. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register offsets */ +#define SUP_UART_DATA 0x00 +#define SUP_UART_LSR 0x04 +#define SUP_UART_MSR 0x08 +#define SUP_UART_LCR 0x0C +#define SUP_UART_MCR 0x10 +#define SUP_UART_DIV_L 0x14 +#define SUP_UART_DIV_H 0x18 +#define SUP_UART_ISC 0x1C +#define SUP_UART_TX_RESIDUE 0x20 +#define SUP_UART_RX_RESIDUE 0x24 + +/* Line Status Register bits */ +#define SUP_UART_LSR_BC BIT(5) /* break condition status */ +#define SUP_UART_LSR_FE BIT(4) /* frame error status */ +#define SUP_UART_LSR_OE BIT(3) /* overrun error status */ +#define SUP_UART_LSR_PE BIT(2) /* parity error status */ +#define SUP_UART_LSR_RX BIT(1) /* 1: receive fifo not empty */ +#define SUP_UART_LSR_TX BIT(0) /* 1: transmit fifo is not full */ +#define SUP_UART_LSR_TX_NOT_FULL 1 +#define SUP_UART_LSR_BRK_ERROR_BITS GENMASK(5, 2) + +/* Line Control Register bits */ +#define SUP_UART_LCR_SBC BIT(5) /* select break condition */ + +/* Modem Control Register bits */ +#define SUP_UART_MCR_RI BIT(3) /* ring indicator */ +#define SUP_UART_MCR_DCD BIT(2) /* data carrier detect */ + +/* Interrupt Status/Control Register bits */ +#define SUP_UART_ISC_RXM BIT(5) /* RX interrupt enable */ +#define SUP_UART_ISC_TXM BIT(4) /* TX interrupt enable */ +#define SUP_UART_ISC_RX BIT(1) /* RX interrupt status */ +#define SUP_UART_ISC_TX BIT(0) /* TX interrupt status */ + +#define SUP_DUMMY_READ BIT(16) /* drop bytes received on a !CREAD port */ +#define SUP_UART_NR 5 + +struct sunplus_uart_port { + struct uart_port port; + struct clk *clk; + struct reset_control *rstc; +}; + +static void sp_uart_put_char(struct uart_port *port, unsigned int ch) +{ + writel(ch, port->membase + SUP_UART_DATA); +} + +static u32 sunplus_tx_buf_not_full(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + return (lsr & SUP_UART_LSR_TX) ? SUP_UART_LSR_TX_NOT_FULL : 0; +} + +static unsigned int sunplus_tx_empty(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + return (lsr & UART_LSR_TEMT) ? TIOCSER_TEMT : 0; +} + +static void sunplus_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int mcr = readl(port->membase + SUP_UART_MCR); + + if (mctrl & TIOCM_DTR) + mcr |= UART_MCR_DTR; + else + mcr &= ~UART_MCR_DTR; + + if (mctrl & TIOCM_RTS) + mcr |= UART_MCR_RTS; + else + mcr &= ~UART_MCR_RTS; + + if (mctrl & TIOCM_CAR) + mcr |= SUP_UART_MCR_DCD; + else + mcr &= ~SUP_UART_MCR_DCD; + + if (mctrl & TIOCM_RI) + mcr |= SUP_UART_MCR_RI; + else + mcr &= ~SUP_UART_MCR_RI; + + if (mctrl & TIOCM_LOOP) + mcr |= UART_MCR_LOOP; + else + mcr &= ~UART_MCR_LOOP; + + writel(mcr, port->membase + SUP_UART_MCR); +} + +static unsigned int sunplus_get_mctrl(struct uart_port *port) +{ + unsigned int mcr, ret = 0; + + mcr = readl(port->membase + SUP_UART_MCR); + + if (mcr & UART_MCR_DTR) + ret |= TIOCM_DTR; + + if (mcr & UART_MCR_RTS) + ret |= TIOCM_RTS; + + if (mcr & SUP_UART_MCR_DCD) + ret |= TIOCM_CAR; + + if (mcr & SUP_UART_MCR_RI) + ret |= TIOCM_RI; + + if (mcr & UART_MCR_LOOP) + ret |= TIOCM_LOOP; + + return ret; +} + +static void sunplus_stop_tx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc &= ~SUP_UART_ISC_TXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_start_tx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc |= SUP_UART_ISC_TXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_stop_rx(struct uart_port *port) +{ + unsigned int isc; + + isc = readl(port->membase + SUP_UART_ISC); + isc &= ~SUP_UART_ISC_RXM; + writel(isc, port->membase + SUP_UART_ISC); +} + +static void sunplus_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + unsigned int lcr; + + spin_lock_irqsave(&port->lock, flags); + + lcr = readl(port->membase + SUP_UART_LCR); + + if (ctl) + lcr |= SUP_UART_LCR_SBC; /* start break */ + else + lcr &= ~SUP_UART_LCR_SBC; /* stop break */ + + writel(lcr, port->membase + SUP_UART_LCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void transmit_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + + if (port->x_char) { + sp_uart_put_char(port, port->x_char); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + sunplus_stop_tx(port); + return; + } + + do { + sp_uart_put_char(port, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; + port->icount.tx++; + + if (uart_circ_empty(xmit)) + break; + } while (sunplus_tx_buf_not_full(port)); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + sunplus_stop_tx(port); +} + +static void receive_chars(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + unsigned int ch, flag; + + do { + ch = readl(port->membase + SUP_UART_DATA); + flag = TTY_NORMAL; + port->icount.rx++; + + if (unlikely(lsr & SUP_UART_LSR_BRK_ERROR_BITS)) { + if (lsr & SUP_UART_LSR_BC) { + lsr &= ~(SUP_UART_LSR_FE | SUP_UART_LSR_PE); + port->icount.brk++; + flag = TTY_BREAK; + if (uart_handle_break(port)) + goto ignore_char; + } else if (lsr & SUP_UART_LSR_PE) { + port->icount.parity++; + flag = TTY_PARITY; + } else if (lsr & SUP_UART_LSR_FE) { + port->icount.frame++; + flag = TTY_FRAME; + } + + if (lsr & SUP_UART_LSR_OE) + port->icount.overrun++; + } + + if (port->ignore_status_mask & SUP_DUMMY_READ) + goto ignore_char; + + if (uart_handle_sysrq_char(port, ch)) + goto ignore_char; + + uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag); + +ignore_char: + lsr = readl(port->membase + SUP_UART_LSR); + } while (lsr & SUP_UART_LSR_RX); + + tty_flip_buffer_push(&port->state->port); +} + +static irqreturn_t sunplus_uart_irq(int irq, void *args) +{ + struct uart_port *port = args; + unsigned int isc; + + spin_lock(&port->lock); + + isc = readl(port->membase + SUP_UART_ISC); + + if (isc & SUP_UART_ISC_RX) + receive_chars(port); + + if (isc & SUP_UART_ISC_TX) + transmit_chars(port); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + +static int sunplus_startup(struct uart_port *port) +{ + unsigned long flags; + unsigned int isc = 0; + int ret; + + ret = request_irq(port->irq, sunplus_uart_irq, 0, "sunplus_uart", port); + if (ret) + return ret; + + spin_lock_irqsave(&port->lock, flags); + /* isc define Bit[7:4] int setting, Bit[3:0] int status + * isc register will clean Bit[3:0] int status after read + * only do a write to Bit[7:4] int setting + */ + isc |= SUP_UART_ISC_RXM; + writel(isc, port->membase + SUP_UART_ISC); + spin_unlock_irqrestore(&port->lock, flags); + + return 0; +} + +static void sunplus_shutdown(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + /* isc define Bit[7:4] int setting, Bit[3:0] int status + * isc register will clean Bit[3:0] int status after read + * only do a write to Bit[7:4] int setting + */ + writel(0, port->membase + SUP_UART_ISC); /* disable all interrupt */ + spin_unlock_irqrestore(&port->lock, flags); + + free_irq(port->irq, port); +} + +static void sunplus_set_termios(struct uart_port *port, + struct ktermios *termios, + struct ktermios *oldtermios) +{ + u32 ext, div, div_l, div_h, baud, lcr; + u32 clk = port->uartclk; + unsigned long flags; + + baud = uart_get_baud_rate(port, termios, oldtermios, 0, port->uartclk / 16); + + /* baud rate = uartclk / ((16 * divisor + 1) + divisor_ext) */ + clk += baud >> 1; + div = clk / baud; + ext = div & 0x0F; + div = (div >> 4) - 1; + div_l = (div & 0xFF) | (ext << 12); + div_h = div >> 8; + + switch (termios->c_cflag & CSIZE) { + case CS5: + lcr = UART_LCR_WLEN5; + break; + case CS6: + lcr = UART_LCR_WLEN6; + break; + case CS7: + lcr = UART_LCR_WLEN7; + break; + default: + lcr = UART_LCR_WLEN8; + break; + } + + if (termios->c_cflag & CSTOPB) + lcr |= UART_LCR_STOP; + + if (termios->c_cflag & PARENB) { + lcr |= UART_LCR_PARITY; + + if (!(termios->c_cflag & PARODD)) + lcr |= UART_LCR_EPAR; + } + + spin_lock_irqsave(&port->lock, flags); + + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = 0; + if (termios->c_iflag & INPCK) + port->read_status_mask |= SUP_UART_LSR_PE | SUP_UART_LSR_FE; + + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= SUP_UART_LSR_BC; + + /* Characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SUP_UART_LSR_FE | SUP_UART_LSR_PE; + + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= SUP_UART_LSR_BC; + + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= SUP_UART_LSR_OE; + } + + /* Ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= SUP_DUMMY_READ; + /* flush rx data FIFO */ + writel(0, port->membase + SUP_UART_RX_RESIDUE); + } + + /* Settings for baud rate divisor and lcr */ + writel(div_h, port->membase + SUP_UART_DIV_H); + writel(div_l, port->membase + SUP_UART_DIV_L); + writel(lcr, port->membase + SUP_UART_LCR); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void sunplus_set_ldisc(struct uart_port *port, struct ktermios *termios) +{ + int new = termios->c_line; + + if (new == N_PPS) + port->flags |= UPF_HARDPPS_CD; + else + port->flags &= ~UPF_HARDPPS_CD; +} + +static const char *sunplus_type(struct uart_port *port) +{ + return port->type == PORT_SUNPLUS ? "sunplus_uart" : NULL; +} + +static void sunplus_config_port(struct uart_port *port, int type) +{ + if (type & UART_CONFIG_TYPE) + port->type = PORT_SUNPLUS; +} + +static int sunplus_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (ser->type != PORT_UNKNOWN && ser->type != PORT_SUNPLUS) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE +static void wait_for_xmitr(struct uart_port *port) +{ + unsigned int val; + int ret; + + /* Wait while FIFO is full or timeout */ + ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val, + (val & SUP_UART_LSR_TX), 1, 10000); + + if (ret == -ETIMEDOUT) { + dev_err(port->dev, "Timeout waiting while UART TX FULL\n"); + return; + } +} +#endif + +#ifdef CONFIG_CONSOLE_POLL +static void sunplus_poll_put_char(struct uart_port *port, unsigned char data) +{ + wait_for_xmitr(port); + sp_uart_put_char(port, data); +} + +static int sunplus_poll_get_char(struct uart_port *port) +{ + unsigned int lsr = readl(port->membase + SUP_UART_LSR); + + if (!(lsr & SUP_UART_LSR_RX)) + return NO_POLL_CHAR; + + return readl(port->membase + SUP_UART_DATA); +} +#endif + +static const struct uart_ops sunplus_uart_ops = { + .tx_empty = sunplus_tx_empty, + .set_mctrl = sunplus_set_mctrl, + .get_mctrl = sunplus_get_mctrl, + .stop_tx = sunplus_stop_tx, + .start_tx = sunplus_start_tx, + .stop_rx = sunplus_stop_rx, + .break_ctl = sunplus_break_ctl, + .startup = sunplus_startup, + .shutdown = sunplus_shutdown, + .set_termios = sunplus_set_termios, + .set_ldisc = sunplus_set_ldisc, + .type = sunplus_type, + .config_port = sunplus_config_port, + .verify_port = sunplus_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_put_char = sunplus_poll_put_char, + .poll_get_char = sunplus_poll_get_char, +#endif +}; + +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE +struct sunplus_uart_port *sunplus_console_ports[SUP_UART_NR]; + +static void sunplus_uart_console_putchar(struct uart_port *port, int ch) +{ + wait_for_xmitr(port); + sp_uart_put_char(port, ch); +} + +static void sunplus_console_write(struct console *co, + const char *s, + unsigned int count) +{ + unsigned long flags; + int locked = 1; + + local_irq_save(flags); + + if (sunplus_console_ports[co->index]->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&sunplus_console_ports[co->index]->port.lock); + else + spin_lock(&sunplus_console_ports[co->index]->port.lock); + + uart_console_write(&sunplus_console_ports[co->index]->port, s, count, + sunplus_uart_console_putchar); + + if (locked) + spin_unlock(&sunplus_console_ports[co->index]->port.lock); + + local_irq_restore(flags); +} + +static int __init sunplus_console_setup(struct console *co, char *options) +{ + struct sunplus_uart_port *sup; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= SUP_UART_NR) + return -EINVAL; + + sup = sunplus_console_ports[co->index]; + if (!sup) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&sup->port, co, baud, parity, bits, flow); +} + +static struct uart_driver sunplus_uart_driver; +static struct console sunplus_uart_console = { + .name = "ttySUP", + .write = sunplus_console_write, + .device = uart_console_device, + .setup = sunplus_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sunplus_uart_driver +}; +#endif + +static struct uart_driver sunplus_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "sunplus_uart", + .dev_name = "ttySUP", + .major = TTY_MAJOR, + .minor = 64, + .nr = SUP_UART_NR, + .cons = &sunplus_uart_console, +}; + +static void sunplus_uart_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void sunplus_uart_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static int sunplus_uart_probe(struct platform_device *pdev) +{ + struct sunplus_uart_port *sup; + struct uart_port *port; + struct resource *res; + int ret, irq; + + pdev->id = of_alias_get_id(pdev->dev.of_node, "serial"); + + if (pdev->id < 0 || pdev->id >= SUP_UART_NR) + return -EINVAL; + + sup = devm_kzalloc(&pdev->dev, sizeof(*sup), GFP_KERNEL); + if (!sup) + return -ENOMEM; + + sup->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(sup->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(sup->clk), "clk not found\n"); + + ret = clk_prepare_enable(sup->clk); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_disable_unprepare, sup->clk); + if (ret) + return ret; + + sup->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(sup->rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(sup->rstc), "rstc not found\n"); + + port = &sup->port; + + port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(port->membase)) + return dev_err_probe(&pdev->dev, PTR_ERR(port->membase), "membase not found\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + port->mapbase = res->start; + port->uartclk = clk_get_rate(sup->clk); + port->line = pdev->id; + port->irq = irq; + port->dev = &pdev->dev; + port->iotype = UPIO_MEM; + port->ops = &sunplus_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->fifosize = 128; + + ret = reset_control_deassert(sup->rstc); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, sunplus_uart_reset_control_assert, sup->rstc); + if (ret) + return ret; + +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE + sunplus_console_ports[sup->port.line] = sup; +#endif + + platform_set_drvdata(pdev, &sup->port); + + ret = uart_add_one_port(&sunplus_uart_driver, &sup->port); +#ifdef CONFIG_SERIAL_SUNPLUS_CONSOLE + if (ret) + sunplus_console_ports[sup->port.line] = NULL; +#endif + + return ret; +} + +static int sunplus_uart_remove(struct platform_device *pdev) +{ + struct sunplus_uart_port *sup = platform_get_drvdata(pdev); + + uart_remove_one_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static int __maybe_unused sunplus_uart_suspend(struct device *dev) +{ + struct sunplus_uart_port *sup = dev_get_drvdata(dev); + + if (!uart_console(&sup->port)) + uart_suspend_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static int __maybe_unused sunplus_uart_resume(struct device *dev) +{ + struct sunplus_uart_port *sup = dev_get_drvdata(dev); + + if (!uart_console(&sup->port)) + uart_resume_port(&sunplus_uart_driver, &sup->port); + + return 0; +} + +static const struct dev_pm_ops sunplus_uart_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunplus_uart_suspend, sunplus_uart_resume) +}; + +static const struct of_device_id sp_uart_of_match[] = { + { .compatible = "sunplus,sp7021-uart" }, + {} +}; +MODULE_DEVICE_TABLE(of, sp_uart_of_match); + +static struct platform_driver sunplus_uart_platform_driver = { + .probe = sunplus_uart_probe, + .remove = sunplus_uart_remove, + .driver = { + .name = "sunplus_uart", + .of_match_table = sp_uart_of_match, + .pm = &sunplus_uart_pm_ops, + } +}; + +static int __init sunplus_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&sunplus_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&sunplus_uart_platform_driver); + if (ret) + uart_unregister_driver(&sunplus_uart_driver); + + return ret; +} +module_init(sunplus_uart_init); + +static void __exit sunplus_uart_exit(void) +{ + platform_driver_unregister(&sunplus_uart_platform_driver); + uart_unregister_driver(&sunplus_uart_driver); +} +module_exit(sunplus_uart_exit); + +#ifdef CONFIG_SERIAL_EARLYCON +static void sunplus_uart_putc(struct uart_port *port, int c) +{ + unsigned int val; + int ret; + + ret = readl_poll_timeout_atomic(port->membase + SUP_UART_LSR, val, + (val & UART_LSR_TEMT), 1, 10000); + if (ret) + return; + + writel(c, port->membase + SUP_UART_DATA); +} + +static void sunplus_uart_early_write(struct console *con, const char *s, unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, sunplus_uart_putc); +} + +static int __init +sunplus_uart_early_setup(struct earlycon_device *dev, const char *opt) +{ + if (!(dev->port.membase || dev->port.iobase)) + return -ENODEV; + + dev->con->write = sunplus_uart_early_write; + + return 0; +} +OF_EARLYCON_DECLARE(sunplus_uart, "sunplus,sp7021-uart", sunplus_uart_early_setup); +#endif + +MODULE_DESCRIPTION("Sunplus UART driver"); +MODULE_AUTHOR("Hammer Hsieh "); +MODULE_LICENSE("GPL v2"); diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 8885e69178bd..6faf502b7860 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -277,4 +277,7 @@ /* Freescale LINFlexD UART */ #define PORT_LINFLEXUART 122 +/* Sunplus UART */ +#define PORT_SUNPLUS 123 + #endif /* _UAPILINUX_SERIAL_CORE_H */ -- cgit v1.2.3-71-gd317 From c2faf737abfb10f88f2d2612d573e9edc3c42c37 Mon Sep 17 00:00:00 2001 From: Max Staudt Date: Fri, 11 Feb 2022 15:10:36 +0100 Subject: tty: Reserve ldisc 29 for development purposes It's handy to have an ldisc number free for out-of-tree testing. This way, a new ldisc can be developed on any running system, without having to recompile the kernel just to define a new number. This is the highest number (and also the last one) available under the old numbering scheme, so let's reserve it before it's too late. From now on, every new ldisc upstreamed will have to increment NR_LDISCS in lockstep with its addition to the table in tty.h. Signed-off-by: Max Staudt Link: https://lore.kernel.org/r/20220211141036.6403-1-max@enpas.org Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/tty.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h index a58deb3061eb..9d0f06bfbac3 100644 --- a/include/uapi/linux/tty.h +++ b/include/uapi/linux/tty.h @@ -6,8 +6,6 @@ * 'tty.h' defines some structures used by tty_io.c and some defines. */ -#define NR_LDISCS 30 - /* line disciplines */ #define N_TTY 0 #define N_SLIP 1 @@ -39,5 +37,9 @@ #define N_SPEAKUP 26 /* Speakup communication with synths */ #define N_NULL 27 /* Null ldisc used for error handling */ #define N_MCTP 28 /* MCTP-over-serial */ +#define N_DEVELOPMENT 29 /* Manual out-of-tree testing */ + +/* Always the newest line discipline + 1 */ +#define NR_LDISCS 30 #endif /* _UAPI_LINUX_TTY_H */ -- cgit v1.2.3-71-gd317 From fad278388e01e3658a356118bed8ee2c2408d280 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 May 2021 21:15:15 -0700 Subject: media: omap3isp: Use struct_group() for memcpy() region In preparation for FORTIFY_SOURCE performing compile-time and run-time field bounds checking for memcpy(), memmove(), and memset(), avoid intentionally writing across neighboring fields. Wrap the target region in struct_group(). This additionally fixes a theoretical misalignment of the copy (since the size of "buf" changes between 64-bit and 32-bit, but this is likely never built for 64-bit). FWIW, I think this code is totally broken on 64-bit (which appears to not be a "real" build configuration): it would either always fail (with an uninitialized data->buf_size) or would cause corruption in userspace due to the copy_to_user() in the call path against an uninitialized data->buf value: omap3isp_stat_request_statistics_time32(...) struct omap3isp_stat_data data64; ... omap3isp_stat_request_statistics(stat, &data64); int omap3isp_stat_request_statistics(struct ispstat *stat, struct omap3isp_stat_data *data) ... buf = isp_stat_buf_get(stat, data); static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, struct omap3isp_stat_data *data) ... if (buf->buf_size > data->buf_size) { ... return ERR_PTR(-EINVAL); } ... rval = copy_to_user(data->buf, buf->virt_addr, buf->buf_size); Regardless, additionally initialize data64 to be zero-filled to avoid undefined behavior. Cc: Laurent Pinchart Cc: Mauro Carvalho Chehab Cc: Arnd Bergmann Cc: Sakari Ailus Cc: linux-media@vger.kernel.org Fixes: 378e3f81cb56 ("media: omap3isp: support 64-bit version of omap3isp_stat_data") Cc: stable@vger.kernel.org Reviewed-by: Gustavo A. R. Silva Link: https://lore.kernel.org/lkml/20211215220505.GB21862@embeddedor Signed-off-by: Kees Cook --- drivers/media/platform/omap3isp/ispstat.c | 5 +++-- include/uapi/linux/omap3isp.h | 21 +++++++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c index 5b9b57f4d9bf..68cf68dbcace 100644 --- a/drivers/media/platform/omap3isp/ispstat.c +++ b/drivers/media/platform/omap3isp/ispstat.c @@ -512,7 +512,7 @@ int omap3isp_stat_request_statistics(struct ispstat *stat, int omap3isp_stat_request_statistics_time32(struct ispstat *stat, struct omap3isp_stat_data_time32 *data) { - struct omap3isp_stat_data data64; + struct omap3isp_stat_data data64 = { }; int ret; ret = omap3isp_stat_request_statistics(stat, &data64); @@ -521,7 +521,8 @@ int omap3isp_stat_request_statistics_time32(struct ispstat *stat, data->ts.tv_sec = data64.ts.tv_sec; data->ts.tv_usec = data64.ts.tv_usec; - memcpy(&data->buf, &data64.buf, sizeof(*data) - sizeof(data->ts)); + data->buf = (uintptr_t)data64.buf; + memcpy(&data->frame, &data64.frame, sizeof(data->frame)); return 0; } diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h index 87b55755f4ff..d9db7ad43890 100644 --- a/include/uapi/linux/omap3isp.h +++ b/include/uapi/linux/omap3isp.h @@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config { * struct omap3isp_stat_data - Statistic data sent to or received from user * @ts: Timestamp of returned framestats. * @buf: Pointer to pass to user. + * @buf_size: Size of buffer. * @frame_number: Frame number of requested stats. * @cur_frame: Current frame number being processed. * @config_counter: Number of the configuration associated with the data. @@ -176,10 +177,12 @@ struct omap3isp_stat_data { struct timeval ts; #endif void __user *buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #ifdef __KERNEL__ @@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 { __s32 tv_usec; } ts; __u32 buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #endif -- cgit v1.2.3-71-gd317 From 89377bc1975c2993bde4a498a3a4e5817ac0ae2c Mon Sep 17 00:00:00 2001 From: Kanchan Joshi Date: Thu, 10 Feb 2022 11:07:55 +0530 Subject: nvme: add vectored-io support for user-passthrough Add a new NVME_IOCTL_IO64_CMD_VEC ioctl that works like the existing NVME_IOCTL_IO64_CMD ioctl except that it takes and array of iovecs and thus supports vectored I/O. - cmd.addr is base address of user iovec array - cmd.vec_cnt is count of iovec array elements This patch does not include vectored-variant for admin-commands as most of them are light on buffers and likely to have low invocation frequency. Signed-off-by: Kanchan Joshi Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/ioctl.c | 35 ++++++++++++++++++++++++++--------- include/uapi/linux/nvme_ioctl.h | 6 +++++- 2 files changed, 31 insertions(+), 10 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 22314962842d..aaf3dfad2657 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -56,7 +56,7 @@ out: static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, u64 *result, unsigned timeout) + u32 meta_seed, u64 *result, unsigned timeout, bool vec) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -75,8 +75,22 @@ static int nvme_submit_user_cmd(struct request_queue *q, nvme_req(req)->flags |= NVME_REQ_USERCMD; if (ubuffer && bufflen) { - ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, + if (!vec) + ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, GFP_KERNEL); + else { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov = fast_iov; + struct iov_iter iter; + + ret = import_iovec(rq_data_dir(req), ubuffer, bufflen, + UIO_FASTIOV, &iov, &iter); + if (ret < 0) + goto out; + ret = blk_rq_map_user_iov(q, req, NULL, &iter, + GFP_KERNEL); + kfree(iov); + } if (ret) goto out; bio = req->bio; @@ -170,7 +184,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) return nvme_submit_user_cmd(ns->queue, &c, nvme_to_user_ptr(io.addr), length, - metadata, meta_len, lower_32_bits(io.slba), NULL, 0); + metadata, meta_len, lower_32_bits(io.slba), NULL, 0, + false); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, @@ -224,7 +239,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, nvme_to_user_ptr(cmd.addr), cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, - 0, &result, timeout); + 0, &result, timeout, false); if (status >= 0) { if (put_user(result, &ucmd->result)) @@ -235,7 +250,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, } static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - struct nvme_passthru_cmd64 __user *ucmd) + struct nvme_passthru_cmd64 __user *ucmd, bool vec) { struct nvme_passthru_cmd64 cmd; struct nvme_command c; @@ -270,7 +285,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, nvme_to_user_ptr(cmd.addr), cmd.data_len, nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, - 0, &cmd.result, timeout); + 0, &cmd.result, timeout, vec); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -296,7 +311,7 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, case NVME_IOCTL_ADMIN_CMD: return nvme_user_cmd(ctrl, NULL, argp); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp); + return nvme_user_cmd64(ctrl, NULL, argp, false); default: return sed_ioctl(ctrl->opal_dev, cmd, argp); } @@ -340,7 +355,9 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, argp); case NVME_IOCTL_IO64_CMD: - return nvme_user_cmd64(ns->ctrl, ns, argp); + return nvme_user_cmd64(ns->ctrl, ns, argp, false); + case NVME_IOCTL_IO64_CMD_VEC: + return nvme_user_cmd64(ns->ctrl, ns, argp, true); default: return -ENOTTY; } @@ -480,7 +497,7 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd, case NVME_IOCTL_ADMIN_CMD: return nvme_user_cmd(ctrl, NULL, argp); case NVME_IOCTL_ADMIN64_CMD: - return nvme_user_cmd64(ctrl, NULL, argp); + return nvme_user_cmd64(ctrl, NULL, argp, false); case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index d99b5a772698..b2e43185e3b5 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -55,7 +55,10 @@ struct nvme_passthru_cmd64 { __u64 metadata; __u64 addr; __u32 metadata_len; - __u32 data_len; + union { + __u32 data_len; /* for non-vectored io */ + __u32 vec_cnt; /* for vectored io */ + }; __u32 cdw10; __u32 cdw11; __u32 cdw12; @@ -78,5 +81,6 @@ struct nvme_passthru_cmd64 { #define NVME_IOCTL_RESCAN _IO('N', 0x46) #define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) #define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ -- cgit v1.2.3-71-gd317 From 0c9f178778919bb500443f340647b44227778fb2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:42 +0800 Subject: iommu: Remove guest pasid related interfaces and definitions The guest pasid related uapi interfaces and definitions are not referenced anywhere in the tree. We've also reached a consensus to replace them with a new iommufd design. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 210 --------------------------------------------- include/linux/iommu.h | 44 ---------- include/uapi/linux/iommu.h | 181 -------------------------------------- 3 files changed, 435 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 107dcf5938d6..3cbf4781e5bd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2031,216 +2031,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) return 0; } -/* - * Check flags and other user provided data for valid combinations. We also - * make sure no reserved fields or unused flags are set. This is to ensure - * not breaking userspace in the future when these fields or flags are used. - */ -static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) -{ - u32 mask; - int i; - - if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) - return -EINVAL; - - mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; - if (info->cache & ~mask) - return -EINVAL; - - if (info->granularity >= IOMMU_INV_GRANU_NR) - return -EINVAL; - - switch (info->granularity) { - case IOMMU_INV_GRANU_ADDR: - if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) - return -EINVAL; - - mask = IOMMU_INV_ADDR_FLAGS_PASID | - IOMMU_INV_ADDR_FLAGS_ARCHID | - IOMMU_INV_ADDR_FLAGS_LEAF; - - if (info->granu.addr_info.flags & ~mask) - return -EINVAL; - break; - case IOMMU_INV_GRANU_PASID: - mask = IOMMU_INV_PASID_FLAGS_PASID | - IOMMU_INV_PASID_FLAGS_ARCHID; - if (info->granu.pasid_info.flags & ~mask) - return -EINVAL; - - break; - case IOMMU_INV_GRANU_DOMAIN: - if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(info->padding); i++) { - if (info->padding[i]) - return -EINVAL; - } - - return 0; -} - -int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, - void __user *uinfo) -{ - struct iommu_cache_invalidate_info inv_info = { 0 }; - u32 minsz; - int ret; - - if (unlikely(!domain->ops->cache_invalidate)) - return -ENODEV; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_cache_invalidate_info, granu); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(&inv_info, uinfo, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (inv_info.argsz < minsz) - return -EINVAL; - - /* PASID and address granu require additional info beyond minsz */ - if (inv_info.granularity == IOMMU_INV_GRANU_PASID && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) - return -EINVAL; - - if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) - return -EINVAL; - - /* - * User might be using a newer UAPI header which has a larger data - * size, we shall support the existing flags within the current - * size. Copy the remaining user data _after_ minsz but not more - * than the current kernel supported size. - */ - if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, - min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) - return -EFAULT; - - /* Now the argsz is validated, check the content */ - ret = iommu_check_cache_invl_data(&inv_info); - if (ret) - return ret; - - return domain->ops->cache_invalidate(domain, dev, &inv_info); -} -EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); - -static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) -{ - u64 mask; - int i; - - if (data->version != IOMMU_GPASID_BIND_VERSION_1) - return -EINVAL; - - /* Check the range of supported formats */ - if (data->format >= IOMMU_PASID_FORMAT_LAST) - return -EINVAL; - - /* Check all flags */ - mask = IOMMU_SVA_GPASID_VAL; - if (data->flags & ~mask) - return -EINVAL; - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(data->padding); i++) { - if (data->padding[i]) - return -EINVAL; - } - - return 0; -} - -static int iommu_sva_prepare_bind_data(void __user *udata, - struct iommu_gpasid_bind_data *data) -{ - u32 minsz; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_gpasid_bind_data, vendor); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(data, udata, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (data->argsz < minsz) - return -EINVAL; - /* - * User might be using a newer UAPI header, we shall let IOMMU vendor - * driver decide on what size it needs. Since the guest PASID bind data - * can be vendor specific, larger argsz could be the result of extension - * for one vendor but it should not affect another vendor. - * Copy the remaining user data _after_ minsz - */ - if (copy_from_user((void *)data + minsz, udata + minsz, - min_t(u32, data->argsz, sizeof(*data)) - minsz)) - return -EFAULT; - - return iommu_check_bind_data(data); -} - -int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return domain->ops->sva_bind_gpasid(domain, dev, &data); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); - -int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid) -{ - if (unlikely(!domain->ops->sva_unbind_gpasid)) - return -ENODEV; - - return domain->ops->sva_unbind_gpasid(dev, pasid); -} -EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); - -int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); - static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index de0c57a567c8..cde1d0aad60f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -229,9 +229,6 @@ struct iommu_iotlb_gather { * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response - * @cache_invalidate: invalidate translation caches - * @sva_bind_gpasid: bind guest pasid and mm - * @sva_unbind_gpasid: unbind guest pasid and mm * @def_domain_type: device default domain type, return value: * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain @@ -301,12 +298,6 @@ struct iommu_ops { int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); - int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info); - int (*sva_bind_gpasid)(struct iommu_domain *domain, - struct device *dev, struct iommu_gpasid_bind_data *data); - - int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); int (*def_domain_type)(struct device *dev); @@ -421,14 +412,6 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - void __user *uinfo); - -extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); @@ -1051,33 +1034,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) return IOMMU_PASID_INVALID; } -static inline int -iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t pasid) -{ - return -ENODEV; -} - static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return NULL; diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index 59178fc229ca..65d8b0234f69 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -158,185 +158,4 @@ struct iommu_page_response { __u32 code; }; -/* defines the granularity of the invalidation */ -enum iommu_inv_granularity { - IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ - IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ - IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ - IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ -}; - -/** - * struct iommu_inv_addr_info - Address Selective Invalidation Structure - * - * @flags: indicates the granularity of the address-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If ARCHID bit is set, @archid is populated and the invalidation relates - * to cache entries tagged with this architecture specific ID and matching - * the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - If neither PASID or ARCHID is set, global addr invalidation applies. - * - The LEAF flag indicates whether only the leaf PTE caching needs to be - * invalidated and other paging structure caches can be preserved. - * @pasid: process address space ID - * @archid: architecture-specific ID - * @addr: first stage/level input address - * @granule_size: page/block size of the mapping in bytes - * @nb_granules: number of contiguous granules to be invalidated - */ -struct iommu_inv_addr_info { -#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) -#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) -#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) - __u32 flags; - __u32 archid; - __u64 pasid; - __u64 addr; - __u64 granule_size; - __u64 nb_granules; -}; - -/** - * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure - * - * @flags: indicates the granularity of the PASID-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If the ARCHID bit is set, the @archid is populated and the invalidation - * relates to cache entries tagged with this architecture specific ID and - * matching the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - At least one of PASID or ARCHID must be set. - * @pasid: process address space ID - * @archid: architecture-specific ID - */ -struct iommu_inv_pasid_info { -#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) -#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) - __u32 flags; - __u32 archid; - __u64 pasid; -}; - -/** - * struct iommu_cache_invalidate_info - First level/stage invalidation - * information - * @argsz: User filled size of this data - * @version: API version of this structure - * @cache: bitfield that allows to select which caches to invalidate - * @granularity: defines the lowest granularity used for the invalidation: - * domain > PASID > addr - * @padding: reserved for future use (should be zero) - * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID - * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR - * - * Not all the combinations of cache/granularity are valid: - * - * +--------------+---------------+---------------+---------------+ - * | type / | DEV_IOTLB | IOTLB | PASID | - * | granularity | | | cache | - * +==============+===============+===============+===============+ - * | DOMAIN | N/A | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | PASID | Y | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | ADDR | Y | Y | N/A | - * +--------------+---------------+---------------+---------------+ - * - * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than - * @version and @cache. - * - * If multiple cache types are invalidated simultaneously, they all - * must support the used granularity. - */ -struct iommu_cache_invalidate_info { - __u32 argsz; -#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 - __u32 version; -/* IOMMU paging structure cache */ -#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ -#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ -#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ -#define IOMMU_CACHE_INV_TYPE_NR (3) - __u8 cache; - __u8 granularity; - __u8 padding[6]; - union { - struct iommu_inv_pasid_info pasid_info; - struct iommu_inv_addr_info addr_info; - } granu; -}; - -/** - * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest - * SVA binding. - * - * @flags: VT-d PASID table entry attributes - * @pat: Page attribute table data to compute effective memory type - * @emt: Extended memory type - * - * Only guest vIOMMU selectable and effective options are passed down to - * the host IOMMU. - */ -struct iommu_gpasid_bind_data_vtd { -#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ -#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ -#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ -#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ -#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ -#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) - __u64 flags; - __u32 pat; - __u32 emt; -}; - -#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ - IOMMU_SVA_VTD_GPASID_EMTE | \ - IOMMU_SVA_VTD_GPASID_PCD | \ - IOMMU_SVA_VTD_GPASID_PWT) - -/** - * struct iommu_gpasid_bind_data - Information about device and guest PASID binding - * @argsz: User filled size of this data - * @version: Version of this data structure - * @format: PASID table entry format - * @flags: Additional information on guest bind request - * @gpgd: Guest page directory base of the guest mm to bind - * @hpasid: Process address space ID used for the guest mm in host IOMMU - * @gpasid: Process address space ID used for the guest mm in guest IOMMU - * @addr_width: Guest virtual address width - * @padding: Reserved for future use (should be zero) - * @vtd: Intel VT-d specific data - * - * Guest to host PASID mapping can be an identity or non-identity, where guest - * has its own PASID space. For non-identify mapping, guest to host PASID lookup - * is needed when VM programs guest PASID into an assigned device. VMM may - * trap such PASID programming then request host IOMMU driver to convert guest - * PASID to host PASID based on this bind data. - */ -struct iommu_gpasid_bind_data { - __u32 argsz; -#define IOMMU_GPASID_BIND_VERSION_1 1 - __u32 version; -#define IOMMU_PASID_FORMAT_INTEL_VTD 1 -#define IOMMU_PASID_FORMAT_LAST 2 - __u32 format; - __u32 addr_width; -#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ - __u64 flags; - __u64 gpgd; - __u64 hpasid; - __u64 gpasid; - __u8 padding[8]; - /* Vendor specific data */ - union { - struct iommu_gpasid_bind_data_vtd vtd; - } vendor; -}; - #endif /* _UAPI_IOMMU_H */ -- cgit v1.2.3-71-gd317 From 7b8135f4df98b155b23754b6065c157861e268f1 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 1 Mar 2022 05:04:34 +0000 Subject: rtnetlink: add new rtm tunnel api for tunnel id filtering This patch adds new rtm tunnel msg and api for tunnel id filtering in dst_metadata devices. First dst_metadata device to use the api is vxlan driver with AF_BRIDGE family. This and later changes add ability in vxlan driver to do tunnel id filtering (or vni filtering) on dst_metadata devices. This is similar to vlan api in the vlan filtering bridge. this patch includes selinux nlmsg_route_perms support for RTM_*TUNNEL api from Benjamin Poirier. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 26 ++++++++++++++++++++++++++ include/uapi/linux/rtnetlink.h | 9 +++++++++ security/selinux/nlmsgtab.c | 5 ++++- 3 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index be09d2ad4b5d..3dfc9ff2ec9b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -713,7 +713,32 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 reserved1; + __u16 reserved2; + __u32 ifindex; +}; + /* VXLAN section */ +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -745,6 +770,7 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 93d934cc4613..0970cb4b1b88 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -185,6 +185,13 @@ enum { RTM_GETNEXTHOPBUCKET, #define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET + RTM_NEWTUNNEL = 120, +#define RTM_NEWTUNNEL RTM_NEWTUNNEL + RTM_DELTUNNEL, +#define RTM_DELTUNNEL RTM_DELTUNNEL + RTM_GETTUNNEL, +#define RTM_GETTUNNEL RTM_GETTUNNEL + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -756,6 +763,8 @@ enum rtnetlink_groups { #define RTNLGRP_BRVLAN RTNLGRP_BRVLAN RTNLGRP_MCTP_IFADDR, #define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR + RTNLGRP_TUNNEL, +#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 94ea2a8b2bb7..6ad3ee02e023 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -91,6 +91,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] = { RTM_NEWNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETNEXTHOPBUCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_NEWTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_DELTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_GETTUNNEL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, }; static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = @@ -176,7 +179,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) * structures at the top of this file with the new mappings * before updating the BUILD_BUG_ON() macro! */ - BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOPBUCKET + 3)); + BUILD_BUG_ON(RTM_MAX != (RTM_NEWTUNNEL + 3)); err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, sizeof(nlmsg_route_perms)); break; -- cgit v1.2.3-71-gd317 From 445b2f36bb4efb81f064e931f28b9ec19f114355 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 1 Mar 2022 05:04:39 +0000 Subject: drivers: vxlan: vnifilter: add support for stats dumping Add support for VXLAN vni filter entries' stats dumping Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/vxlan/vxlan_vnifilter.c | 92 ++++++++++++++++++++++++++++++++++--- include/uapi/linux/if_link.h | 25 +++++++++- 2 files changed, 110 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 2d23312f4f62..9f28d0b6a6b2 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -116,6 +116,34 @@ void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan) spin_unlock(&vn->sock_lock); } +static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode, + struct vxlan_vni_stats *dest) +{ + int i; + + memset(dest, 0, sizeof(*dest)); + for_each_possible_cpu(i) { + struct vxlan_vni_stats_pcpu *pstats; + struct vxlan_vni_stats temp; + unsigned int start; + + pstats = per_cpu_ptr(vninode->stats, i); + do { + start = u64_stats_fetch_begin_irq(&pstats->syncp); + memcpy(&temp, &pstats->stats, sizeof(temp)); + } while (u64_stats_fetch_retry_irq(&pstats->syncp, start)); + + dest->rx_packets += temp.rx_packets; + dest->rx_bytes += temp.rx_bytes; + dest->rx_drops += temp.rx_drops; + dest->rx_errors += temp.rx_errors; + dest->tx_packets += temp.tx_packets; + dest->tx_bytes += temp.tx_bytes; + dest->tx_drops += temp.tx_drops; + dest->tx_errors += temp.tx_errors; + } +} + static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode, int type, unsigned int len) { @@ -182,9 +210,48 @@ static size_t vxlan_vnifilter_entry_nlmsg_size(void) + nla_total_size(sizeof(struct in6_addr));/* VXLAN_VNIFILTER_ENTRY_GROUP{6} */ } +static int __vnifilter_entry_fill_stats(struct sk_buff *skb, + const struct vxlan_vni_node *vbegin) +{ + struct vxlan_vni_stats vstats; + struct nlattr *vstats_attr; + + vstats_attr = nla_nest_start(skb, VXLAN_VNIFILTER_ENTRY_STATS); + if (!vstats_attr) + goto out_stats_err; + + vxlan_vnifilter_stats_get(vbegin, &vstats); + if (nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_BYTES, + vstats.rx_bytes, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_PKTS, + vstats.rx_packets, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_DROPS, + vstats.rx_drops, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_RX_ERRORS, + vstats.rx_errors, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_BYTES, + vstats.tx_bytes, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_PKTS, + vstats.tx_packets, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_DROPS, + vstats.tx_drops, VNIFILTER_ENTRY_STATS_PAD) || + nla_put_u64_64bit(skb, VNIFILTER_ENTRY_STATS_TX_ERRORS, + vstats.tx_errors, VNIFILTER_ENTRY_STATS_PAD)) + goto out_stats_err; + + nla_nest_end(skb, vstats_attr); + + return 0; + +out_stats_err: + nla_nest_cancel(skb, vstats_attr); + return -EMSGSIZE; +} + static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb, struct vxlan_vni_node *vbegin, - struct vxlan_vni_node *vend) + struct vxlan_vni_node *vend, + bool fill_stats) { struct nlattr *ventry; u32 vs = be32_to_cpu(vbegin->vni); @@ -217,6 +284,9 @@ static bool vxlan_fill_vni_filter_entry(struct sk_buff *skb, } } + if (fill_stats && __vnifilter_entry_fill_stats(skb, vbegin)) + goto out_err; + nla_nest_end(skb, ventry); return true; @@ -249,7 +319,7 @@ static void vxlan_vnifilter_notify(const struct vxlan_dev *vxlan, tmsg->family = AF_BRIDGE; tmsg->ifindex = vxlan->dev->ifindex; - if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode)) + if (!vxlan_fill_vni_filter_entry(skb, vninode, vninode, false)) goto out_err; nlmsg_end(skb, nlh); @@ -269,10 +339,11 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev, { struct vxlan_vni_node *tmp, *v, *vbegin = NULL, *vend = NULL; struct vxlan_dev *vxlan = netdev_priv(dev); - struct tunnel_msg *new_tmsg; + struct tunnel_msg *new_tmsg, *tmsg; int idx = 0, s_idx = cb->args[1]; struct vxlan_vni_group *vg; struct nlmsghdr *nlh; + bool dump_stats; int err = 0; if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)) @@ -283,6 +354,9 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev, if (!vg || !vg->num_vnis) return 0; + tmsg = nlmsg_data(cb->nlh); + dump_stats = !!(tmsg->flags & TUNNEL_MSG_FLAG_STATS); + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWTUNNEL, sizeof(*new_tmsg), NLM_F_MULTI); if (!nlh) @@ -302,11 +376,12 @@ static int vxlan_vnifilter_dump_dev(const struct net_device *dev, vend = v; continue; } - if (vnirange(vend, v) == 1 && + if (!dump_stats && vnirange(vend, v) == 1 && vxlan_addr_equal(&v->remote_ip, &vend->remote_ip)) { goto update_end; } else { - if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend)) { + if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, + dump_stats)) { err = -EMSGSIZE; break; } @@ -318,7 +393,7 @@ update_end: } if (!err && vbegin) { - if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend)) + if (!vxlan_fill_vni_filter_entry(skb, vbegin, vend, dump_stats)) err = -EMSGSIZE; } @@ -338,6 +413,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb tmsg = nlmsg_data(cb->nlh); + if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) { + NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header"); + return -EINVAL; + } + rcu_read_lock(); if (tmsg->ifindex) { dev = dev_get_by_index_rcu(net, tmsg->ifindex); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 3dfc9ff2ec9b..e315e53125f4 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -716,18 +716,41 @@ enum ipvlan_mode { /* Tunnel RTM header */ struct tunnel_msg { __u8 family; - __u8 reserved1; + __u8 flags; __u16 reserved2; __u32 ifindex; }; /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + enum { VXLAN_VNIFILTER_ENTRY_UNSPEC, VXLAN_VNIFILTER_ENTRY_START, VXLAN_VNIFILTER_ENTRY_END, VXLAN_VNIFILTER_ENTRY_GROUP, VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, __VXLAN_VNIFILTER_ENTRY_MAX }; #define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) -- cgit v1.2.3-71-gd317 From cedd3614e5d9c80908099c19f8716714ce0610b1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 24 Feb 2022 11:06:54 +0530 Subject: perf: Add irq and exception return branch types This expands generic branch type classification by adding two more entries there in i.e irq and exception return. Also updates the x86 implementation to process X86_BR_IRET and X86_BR_IRQ records as appropriate. This changes branch types reported to user space on x86 platform but it should not be a problem. The possible scenarios and impacts are enumerated here. Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1645681014-3346-1-git-send-email-anshuman.khandual@arm.com --- arch/x86/events/intel/lbr.c | 4 ++-- include/uapi/linux/perf_event.h | 2 ++ tools/include/uapi/linux/perf_event.h | 2 ++ tools/perf/util/branch.c | 4 +++- 4 files changed, 9 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 669c2be14784..fe1742c4ca49 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1329,10 +1329,10 @@ static int branch_map[X86_BR_TYPE_MAP_MAX] = { PERF_BR_SYSCALL, /* X86_BR_SYSCALL */ PERF_BR_SYSRET, /* X86_BR_SYSRET */ PERF_BR_UNKNOWN, /* X86_BR_INT */ - PERF_BR_UNKNOWN, /* X86_BR_IRET */ + PERF_BR_ERET, /* X86_BR_IRET */ PERF_BR_COND, /* X86_BR_JCC */ PERF_BR_UNCOND, /* X86_BR_JMP */ - PERF_BR_UNKNOWN, /* X86_BR_IRQ */ + PERF_BR_IRQ, /* X86_BR_IRQ */ PERF_BR_IND_CALL, /* X86_BR_IND_CALL */ PERF_BR_UNKNOWN, /* X86_BR_ABORT */ PERF_BR_UNKNOWN, /* X86_BR_IN_TX */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 1b65042ab1db..7dc71768749d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -251,6 +251,8 @@ enum { PERF_BR_SYSRET = 8, /* syscall return */ PERF_BR_COND_CALL = 9, /* conditional function call */ PERF_BR_COND_RET = 10, /* conditional function return */ + PERF_BR_ERET = 11, /* exception return */ + PERF_BR_IRQ = 12, /* irq */ PERF_BR_MAX, }; diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 4cd39aaccbe7..d1324c427e8b 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -251,6 +251,8 @@ enum { PERF_BR_SYSRET = 8, /* syscall return */ PERF_BR_COND_CALL = 9, /* conditional function call */ PERF_BR_COND_RET = 10, /* conditional function return */ + PERF_BR_ERET = 11, /* exception return */ + PERF_BR_IRQ = 12, /* irq */ PERF_BR_MAX, }; diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c index 2285b1eb3128..a9a909db8cc7 100644 --- a/tools/perf/util/branch.c +++ b/tools/perf/util/branch.c @@ -49,7 +49,9 @@ const char *branch_type_name(int type) "SYSCALL", "SYSRET", "COND_CALL", - "COND_RET" + "COND_RET", + "ERET", + "IRQ" }; if (type >= 0 && type < PERF_BR_MAX) -- cgit v1.2.3-71-gd317 From 4f0bfdfd8323e5b461fc1042143a1097dba9fced Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 23 Feb 2022 19:34:31 -0800 Subject: ELF: Properly redefine PT_GNU_* in terms of PT_LOOS The PT_GNU_* program header types are actually offsets from PT_LOOS, so redefine them as such, reorder them, and add the missing PT_GNU_RELRO. Cc: Eric Biederman Cc: Peter Collingbourne Cc: Catalin Marinas Cc: Dave Martin Signed-off-by: Kees Cook --- include/uapi/linux/elf.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 61bf4774b8f2..6438d55529bf 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -35,10 +35,11 @@ typedef __s64 Elf64_Sxword; #define PT_HIOS 0x6fffffff /* OS-specific */ #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff -#define PT_GNU_EH_FRAME 0x6474e550 -#define PT_GNU_PROPERTY 0x6474e553 - +#define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550) #define PT_GNU_STACK (PT_LOOS + 0x474e551) +#define PT_GNU_RELRO (PT_LOOS + 0x474e552) +#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553) + /* * Extended Numbering -- cgit v1.2.3-71-gd317 From dd0ca255f3d27a1bb43d8e9529fb3645f9a341a3 Mon Sep 17 00:00:00 2001 From: Daniel Braunwarth Date: Mon, 28 Feb 2022 14:30:28 +0100 Subject: if_ether.h: add PROFINET Ethertype Add the Ethertype for PROFINET protocol. Signed-off-by: Daniel Braunwarth Signed-off-by: Jakub Kicinski --- include/uapi/linux/if_ether.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index c0c2f3ed5729..4f4ed35a16db 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -86,6 +86,7 @@ * over Ethernet */ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#define ETH_P_PROFINET 0x8892 /* PROFINET */ #define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ -- cgit v1.2.3-71-gd317 From cd73cda742fbe1f33ed7306c7a01aa64f4e6ebd5 Mon Sep 17 00:00:00 2001 From: Daniel Braunwarth Date: Mon, 28 Feb 2022 14:30:29 +0100 Subject: if_ether.h: add EtherCAT Ethertype Add the Ethertype for EtherCAT protocol. Signed-off-by: Daniel Braunwarth Signed-off-by: Jakub Kicinski --- include/uapi/linux/if_ether.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 4f4ed35a16db..1d0bccc3fa54 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -89,6 +89,7 @@ #define ETH_P_PROFINET 0x8892 /* PROFINET */ #define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ +#define ETH_P_ETHERCAT 0x88A4 /* EtherCAT */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ -- cgit v1.2.3-71-gd317 From d58b8a99cbb84c1eb3b3613d23c1a328695a9455 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 25 Feb 2022 12:33:51 -0500 Subject: drm/amdkfd: Add SMI add event helper To remove duplicate code, unify event message format and simplify new event add in the following patches. Use KFD_SMI_EVENT_MSG_SIZE to define msg size, the same size will be used in user space to alloc the msg receive buffer. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 68 ++++++++++------------------- include/uapi/linux/kfd_ioctl.h | 1 + 2 files changed, 23 insertions(+), 46 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 1321fe91a1cf..bba8d9692a4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -176,22 +176,28 @@ static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event, rcu_read_unlock(); } -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +static void kfd_smi_event_add(struct kfd_dev *dev, unsigned int event, + char *fmt, ...) { - /* - * GpuReset msg = Reset seq number (incremented for - * every reset message sent before GPU reset). - * 1 byte event + 1 byte space + 8 bytes seq num + - * 1 byte \n + 1 byte \0 = 12 - */ - char fifo_in[12]; + char fifo_in[KFD_SMI_EVENT_MSG_SIZE]; int len; - unsigned int event; + va_list args; if (list_empty(&dev->smi_clients)) return; - memset(fifo_in, 0x0, sizeof(fifo_in)); + len = snprintf(fifo_in, sizeof(fifo_in), "%x ", event); + + va_start(args, fmt); + len += vsnprintf(fifo_in + len, sizeof(fifo_in) - len, fmt, args); + va_end(args); + + add_event_to_kfifo(dev, event, fifo_in, len); +} + +void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +{ + unsigned int event; if (post_reset) { event = KFD_SMI_EVENT_GPU_POST_RESET; @@ -199,48 +205,20 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) event = KFD_SMI_EVENT_GPU_PRE_RESET; ++(dev->reset_seq_num); } - - len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event, - dev->reset_seq_num); - - add_event_to_kfifo(dev, event, fifo_in, len); + kfd_smi_event_add(dev, event, "%x\n", dev->reset_seq_num); } void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint64_t throttle_bitmask) { - /* - * ThermalThrottle msg = throttle_bitmask(8): - * thermal_interrupt_count(16): - * 1 byte event + 1 byte space + 16 byte throttle_bitmask + - * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n + - * 1 byte \0 = 37 - */ - char fifo_in[37]; - int len; - - if (list_empty(&dev->smi_clients)) - return; - - len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", - KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, - amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); - - add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); + kfd_smi_event_add(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n", + throttle_bitmask, + amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); } void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { struct amdgpu_task_info task_info; - /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ - /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + - * 1 byte \0 = 29 - */ - char fifo_in[29]; - int len; - - if (list_empty(&dev->smi_clients)) - return; memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(dev->adev, pasid, &task_info); @@ -248,10 +226,8 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) if (!task_info.pid) return; - len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT, - task_info.pid, task_info.task_name); - - add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len); + kfd_smi_event_add(dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n", + task_info.pid, task_info.task_name); } int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index baec5a41de3e..b40687bf1014 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -463,6 +463,7 @@ enum kfd_smi_event { }; #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) +#define KFD_SMI_EVENT_MSG_SIZE 96 struct kfd_ioctl_smi_events_args { __u32 gpuid; /* to KFD */ -- cgit v1.2.3-71-gd317 From 46efc97b73060823fdc18103a5e317a8327d44e1 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:17 +0200 Subject: net: rtnetlink: RTM_GETSTATS: Allow filtering inside nests The filter_mask field of RTM_GETSTATS header determines which top-level attributes should be included in the netlink response. This saves processing time by only including the bits that the user cares about instead of always dumping everything. This is doubly important for HW-backed statistics that would typically require a trip to the device to fetch the stats. So far there was only one HW-backed stat suite per attribute. However, IFLA_STATS_LINK_OFFLOAD_XSTATS is a nest, and will gain a new stat suite in the following patches. It would therefore be advantageous to be able to filter within that nest, and select just one or the other HW-backed statistics suite. Extend rtnetlink so that RTM_GETSTATS permits attributes in the payload. The scheme is as follows: - RTM_GETSTATS - struct if_stats_msg - attr nest IFLA_STATS_GET_FILTERS - attr IFLA_STATS_LINK_OFFLOAD_XSTATS - u32 filter_mask This scheme reuses the existing enumerators by nesting them in a dedicated context attribute. This is covered by policies as usual, therefore a gradual opt-in is possible. Currently only IFLA_STATS_LINK_OFFLOAD_XSTATS nest has filtering enabled, because for the SW counters the issue does not seem to be that important. rtnl_offload_xstats_get_size() and _fill() are extended to observe the requested filters. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 10 +++ net/core/rtnetlink.c | 141 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 128 insertions(+), 23 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index e315e53125f4..4d62ea6e1288 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1207,6 +1207,16 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ad858799fd93..31aa26062070 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5092,13 +5092,15 @@ rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, } static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, - int *prividx) + int *prividx, u32 off_filter_mask) { int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; bool have_data = false; int err; - if (*prividx <= attr_id_cpu_hit) { + if (*prividx <= attr_id_cpu_hit && + (off_filter_mask & + IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) { err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb); if (!err) { have_data = true; @@ -5115,14 +5117,18 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, return 0; } -static int rtnl_offload_xstats_get_size(const struct net_device *dev) +static int rtnl_offload_xstats_get_size(const struct net_device *dev, + u32 off_filter_mask) { int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; int nla_size = 0; int size; - size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); - nla_size += nla_total_size_64bit(size); + if (off_filter_mask & + IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) { + size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit); + nla_size += nla_total_size_64bit(size); + } if (nla_size != 0) nla_size += nla_total_size(0); @@ -5130,11 +5136,20 @@ static int rtnl_offload_xstats_get_size(const struct net_device *dev) return nla_size; } +struct rtnl_stats_dump_filters { + /* mask[0] filters outer attributes. Then individual nests have their + * filtering mask at the index of the nested attribute. + */ + u32 mask[IFLA_STATS_MAX + 1]; +}; + static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, - unsigned int flags, unsigned int filter_mask, + unsigned int flags, + const struct rtnl_stats_dump_filters *filters, int *idxattr, int *prividx) { + unsigned int filter_mask = filters->mask[0]; struct if_stats_msg *ifsm; struct nlmsghdr *nlh; struct nlattr *attr; @@ -5210,13 +5225,17 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, *idxattr)) { + u32 off_filter_mask; + + off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS; attr = nla_nest_start_noflag(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS); if (!attr) goto nla_put_failure; - err = rtnl_offload_xstats_fill(skb, dev, prividx); + err = rtnl_offload_xstats_fill(skb, dev, prividx, + off_filter_mask); if (err == -ENODATA) nla_nest_cancel(skb, attr); else @@ -5281,9 +5300,10 @@ nla_put_failure: } static size_t if_nlmsg_stats_size(const struct net_device *dev, - u32 filter_mask) + const struct rtnl_stats_dump_filters *filters) { size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); + unsigned int filter_mask = filters->mask[0]; if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); @@ -5319,8 +5339,12 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, } } - if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) - size += rtnl_offload_xstats_get_size(dev); + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) { + u32 off_filter_mask; + + off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS]; + size += rtnl_offload_xstats_get_size(dev, off_filter_mask); + } if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) { struct rtnl_af_ops *af_ops; @@ -5344,6 +5368,74 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, return size; } +#define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1) + +static const struct nla_policy +rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = { + [IFLA_STATS_LINK_OFFLOAD_XSTATS] = + NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID), +}; + +static const struct nla_policy +rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { + [IFLA_STATS_GET_FILTERS] = + NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), +}; + +static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, + struct rtnl_stats_dump_filters *filters, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_STATS_MAX + 1]; + int err; + int at; + + err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters, + rtnl_stats_get_policy_filters, extack); + if (err < 0) + return err; + + for (at = 1; at <= IFLA_STATS_MAX; at++) { + if (tb[at]) { + if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) { + NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask"); + return -EINVAL; + } + filters->mask[at] = nla_get_u32(tb[at]); + } + } + + return 0; +} + +static int rtnl_stats_get_parse(const struct nlmsghdr *nlh, + u32 filter_mask, + struct rtnl_stats_dump_filters *filters, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; + int err; + int i; + + filters->mask[0] = filter_mask; + for (i = 1; i < ARRAY_SIZE(filters->mask); i++) + filters->mask[i] = -1U; + + err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb, + IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack); + if (err < 0) + return err; + + if (tb[IFLA_STATS_GET_FILTERS]) { + err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS], + filters, extack); + if (err) + return err; + } + + return 0; +} + static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, bool is_dump, struct netlink_ext_ack *extack) { @@ -5366,10 +5458,6 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request"); return -EINVAL; } - if (nlmsg_attrlen(nlh, sizeof(*ifsm))) { - NL_SET_ERR_MSG(extack, "Invalid attributes after stats header"); - return -EINVAL; - } if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) { NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask"); return -EINVAL; @@ -5381,12 +5469,12 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check, static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct rtnl_stats_dump_filters filters; struct net *net = sock_net(skb->sk); struct net_device *dev = NULL; int idxattr = 0, prividx = 0; struct if_stats_msg *ifsm; struct sk_buff *nskb; - u32 filter_mask; int err; err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), @@ -5403,19 +5491,22 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh, if (!dev) return -ENODEV; - filter_mask = ifsm->filter_mask; - if (!filter_mask) { + if (!ifsm->filter_mask) { NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get"); return -EINVAL; } - nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL); + err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack); + if (err) + return err; + + nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL); if (!nskb) return -ENOBUFS; err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - 0, filter_mask, &idxattr, &prividx); + 0, &filters, &idxattr, &prividx); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ WARN_ON(err == -EMSGSIZE); @@ -5431,12 +5522,12 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct netlink_ext_ack *extack = cb->extack; int h, s_h, err, s_idx, s_idxattr, s_prividx; + struct rtnl_stats_dump_filters filters; struct net *net = sock_net(skb->sk); unsigned int flags = NLM_F_MULTI; struct if_stats_msg *ifsm; struct hlist_head *head; struct net_device *dev; - u32 filter_mask = 0; int idx = 0; s_h = cb->args[0]; @@ -5451,12 +5542,16 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) return err; ifsm = nlmsg_data(cb->nlh); - filter_mask = ifsm->filter_mask; - if (!filter_mask) { + if (!ifsm->filter_mask) { NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump"); return -EINVAL; } + err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters, + extack); + if (err) + return err; + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &net->dev_index_head[h]; @@ -5466,7 +5561,7 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb) err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, - flags, filter_mask, + flags, &filters, &s_idxattr, &s_prividx); /* If we ran out of room on the first message, * we're in trouble -- cgit v1.2.3-71-gd317 From 9309f97aef6d8250bb484dabeac925c3a7c57716 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:20 +0200 Subject: net: dev: Add hardware stats support Offloading switch device drivers may be able to collect statistics of the traffic taking place in the HW datapath that pertains to a certain soft netdevice, such as VLAN. Add the necessary infrastructure to allow exposing these statistics to the offloaded netdevice in question. The API was shaped by the following considerations: - Collection of HW statistics is not free: there may be a finite number of counters, and the act of counting may have a performance impact. It is therefore necessary to allow toggling whether HW counting should be done for any particular SW netdevice. - As the drivers are loaded and removed, a particular device may get offloaded and unoffloaded again. At the same time, the statistics values need to stay monotonic (modulo the eventual 64-bit wraparound), increasing only to reflect traffic measured in the device. To that end, the netdevice keeps around a lazily-allocated copy of struct rtnl_link_stats64. Device drivers then contribute to the values kept therein at various points. Even as the driver goes away, the struct stays around to maintain the statistics values. - Different HW devices may be able to count different things. The motivation behind this patch in particular is exposure of HW counters on Nvidia Spectrum switches, where the only practical approach to counting traffic on offloaded soft netdevices currently is to use router interface counters, and count L3 traffic. Correspondingly that is the statistics suite added in this patch. Other devices may be able to measure different kinds of traffic, and for that reason, the APIs are built to allow uniform access to different statistics suites. - Because soft netdevices and offloading drivers are only loosely bound, a netdevice uses a notifier chain to communicate with the drivers. Several new notifiers, NETDEV_OFFLOAD_XSTATS_*, have been added to carry messages to the offloading drivers. - Devices can have various conditions for when a particular counter is available. As the device is configured and reconfigured, the device offload may become or cease being suitable for counter binding. A netdevice can use a notifier type NETDEV_OFFLOAD_XSTATS_REPORT_USED to ping offloading drivers and determine whether anyone currently implements a given statistics suite. This information can then be propagated to user space. When the driver decides to unoffload a netdevice, it can use a newly-added function, netdev_offload_xstats_report_delta(), to record outstanding collected statistics, before destroying the HW counter. This patch adds a helper, call_netdevice_notifiers_info_robust(), for dispatching a notifier with the possibility of unwind when one of the consumers bails. Given the wish to eventually get rid of the global notifier block altogether, this helper only invokes the per-netns notifier block. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 42 +++++++ include/uapi/linux/if_link.h | 15 +++ net/core/dev.c | 267 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 323 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c79ee2296296..19a27ac361ef 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1950,6 +1950,7 @@ enum netdev_ml_priv_type { * @watchdog_dev_tracker: refcount tracker used by watchdog. * @dev_registered_tracker: tracker for reference held while * registered + * @offload_xstats_l3: L3 HW stats for this netdevice. * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2287,6 +2288,7 @@ struct net_device { netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2727,6 +2729,10 @@ enum netdev_cmd { NETDEV_CVLAN_FILTER_DROP_INFO, NETDEV_SVLAN_FILTER_PUSH_INFO, NETDEV_SVLAN_FILTER_DROP_INFO, + NETDEV_OFFLOAD_XSTATS_ENABLE, + NETDEV_OFFLOAD_XSTATS_DISABLE, + NETDEV_OFFLOAD_XSTATS_REPORT_USED, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); @@ -2777,6 +2783,42 @@ struct netdev_notifier_pre_changeaddr_info { const unsigned char *dev_addr; }; +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; /* must be first */ + enum netdev_offload_xstats_type type; + + union { + /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ + struct netdev_notifier_offload_xstats_rd *report_delta; + /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +int netdev_offload_xstats_enable(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct netlink_ext_ack *extack); +int netdev_offload_xstats_disable(struct net_device *dev, + enum netdev_offload_xstats_type type); +bool netdev_offload_xstats_enabled(const struct net_device *dev, + enum netdev_offload_xstats_type type); +int netdev_offload_xstats_get(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *stats, bool *used, + struct netlink_ext_ack *extack); +void +netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, + const struct rtnl_hw_stats64 *stats); +void +netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); +void netdev_offload_xstats_push_delta(struct net_device *dev, + enum netdev_offload_xstats_type type, + const struct rtnl_hw_stats64 *stats); + static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 4d62ea6e1288..ef6a62a2e15d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -245,6 +245,21 @@ struct rtnl_link_stats64 { __u64 rx_nohandler; }; +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; diff --git a/net/core/dev.c b/net/core/dev.c index 2d6771075720..c9e54e5ad48d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1622,7 +1622,8 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) - N(PRE_CHANGEADDR) + N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) + N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) } #undef N return "UNKNOWN_NETDEV_EVENT"; @@ -1939,6 +1940,32 @@ static int call_netdevice_notifiers_info(unsigned long val, return raw_notifier_call_chain(&netdev_chain, val, info); } +/** + * call_netdevice_notifiers_info_robust - call per-netns notifier blocks + * for and rollback on error + * @val_up: value passed unmodified to notifier function + * @val_down: value passed unmodified to the notifier function when + * recovering from an error on @val_up + * @info: notifier information data + * + * Call all per-netns network notifier blocks, but not notifier blocks on + * the global notifier chain. Parameters and return value are as for + * raw_notifier_call_chain_robust(). + */ + +static int +call_netdevice_notifiers_info_robust(unsigned long val_up, + unsigned long val_down, + struct netdev_notifier_info *info) +{ + struct net *net = dev_net(info->dev); + + ASSERT_RTNL(); + + return raw_notifier_call_chain_robust(&net->netdev_chain, + val_up, val_down, info); +} + static int call_netdevice_notifiers_extack(unsigned long val, struct net_device *dev, struct netlink_ext_ack *extack) @@ -7728,6 +7755,242 @@ void netdev_bonding_info_change(struct net_device *dev, } EXPORT_SYMBOL(netdev_bonding_info_change); +static int netdev_offload_xstats_enable_l3(struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, + }; + int err; + int rc; + + dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3), + GFP_KERNEL); + if (!dev->offload_xstats_l3) + return -ENOMEM; + + rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE, + NETDEV_OFFLOAD_XSTATS_DISABLE, + &info.info); + err = notifier_to_errno(rc); + if (err) + goto free_stats; + + return 0; + +free_stats: + kfree(dev->offload_xstats_l3); + dev->offload_xstats_l3 = NULL; + return err; +} + +int netdev_offload_xstats_enable(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct netlink_ext_ack *extack) +{ + ASSERT_RTNL(); + + if (netdev_offload_xstats_enabled(dev, type)) + return -EALREADY; + + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + return netdev_offload_xstats_enable_l3(dev, extack); + } + + WARN_ON(1); + return -EINVAL; +} +EXPORT_SYMBOL(netdev_offload_xstats_enable); + +static void netdev_offload_xstats_disable_l3(struct net_device *dev) +{ + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3, + }; + + call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE, + &info.info); + kfree(dev->offload_xstats_l3); + dev->offload_xstats_l3 = NULL; +} + +int netdev_offload_xstats_disable(struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + ASSERT_RTNL(); + + if (!netdev_offload_xstats_enabled(dev, type)) + return -EALREADY; + + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + netdev_offload_xstats_disable_l3(dev); + return 0; + } + + WARN_ON(1); + return -EINVAL; +} +EXPORT_SYMBOL(netdev_offload_xstats_disable); + +static void netdev_offload_xstats_disable_all(struct net_device *dev) +{ + netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3); +} + +static struct rtnl_hw_stats64 * +netdev_offload_xstats_get_ptr(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + switch (type) { + case NETDEV_OFFLOAD_XSTATS_TYPE_L3: + return dev->offload_xstats_l3; + } + + WARN_ON(1); + return NULL; +} + +bool netdev_offload_xstats_enabled(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + ASSERT_RTNL(); + + return netdev_offload_xstats_get_ptr(dev, type); +} +EXPORT_SYMBOL(netdev_offload_xstats_enabled); + +struct netdev_notifier_offload_xstats_ru { + bool used; +}; + +struct netdev_notifier_offload_xstats_rd { + struct rtnl_hw_stats64 stats; + bool used; +}; + +static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest, + const struct rtnl_hw_stats64 *src) +{ + dest->rx_packets += src->rx_packets; + dest->tx_packets += src->tx_packets; + dest->rx_bytes += src->rx_bytes; + dest->tx_bytes += src->tx_bytes; + dest->rx_errors += src->rx_errors; + dest->tx_errors += src->tx_errors; + dest->rx_dropped += src->rx_dropped; + dest->tx_dropped += src->tx_dropped; + dest->multicast += src->multicast; +} + +static int netdev_offload_xstats_get_used(struct net_device *dev, + enum netdev_offload_xstats_type type, + bool *p_used, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_ru report_used = {}; + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = type, + .report_used = &report_used, + }; + int rc; + + WARN_ON(!netdev_offload_xstats_enabled(dev, type)); + rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED, + &info.info); + *p_used = report_used.used; + return notifier_to_errno(rc); +} + +static int netdev_offload_xstats_get_stats(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *p_stats, + bool *p_used, + struct netlink_ext_ack *extack) +{ + struct netdev_notifier_offload_xstats_rd report_delta = {}; + struct netdev_notifier_offload_xstats_info info = { + .info.dev = dev, + .info.extack = extack, + .type = type, + .report_delta = &report_delta, + }; + struct rtnl_hw_stats64 *stats; + int rc; + + stats = netdev_offload_xstats_get_ptr(dev, type); + if (WARN_ON(!stats)) + return -EINVAL; + + rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, + &info.info); + + /* Cache whatever we got, even if there was an error, otherwise the + * successful stats retrievals would get lost. + */ + netdev_hw_stats64_add(stats, &report_delta.stats); + + if (p_stats) + *p_stats = *stats; + *p_used = report_delta.used; + + return notifier_to_errno(rc); +} + +int netdev_offload_xstats_get(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *p_stats, bool *p_used, + struct netlink_ext_ack *extack) +{ + ASSERT_RTNL(); + + if (p_stats) + return netdev_offload_xstats_get_stats(dev, type, p_stats, + p_used, extack); + else + return netdev_offload_xstats_get_used(dev, type, p_used, + extack); +} +EXPORT_SYMBOL(netdev_offload_xstats_get); + +void +netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta, + const struct rtnl_hw_stats64 *stats) +{ + report_delta->used = true; + netdev_hw_stats64_add(&report_delta->stats, stats); +} +EXPORT_SYMBOL(netdev_offload_xstats_report_delta); + +void +netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used) +{ + report_used->used = true; +} +EXPORT_SYMBOL(netdev_offload_xstats_report_used); + +void netdev_offload_xstats_push_delta(struct net_device *dev, + enum netdev_offload_xstats_type type, + const struct rtnl_hw_stats64 *p_stats) +{ + struct rtnl_hw_stats64 *stats; + + ASSERT_RTNL(); + + stats = netdev_offload_xstats_get_ptr(dev, type); + if (WARN_ON(!stats)) + return; + + netdev_hw_stats64_add(stats, p_stats); +} +EXPORT_SYMBOL(netdev_offload_xstats_push_delta); + /** * netdev_get_xmit_slave - Get the xmit slave of master device * @dev: device @@ -10417,6 +10680,8 @@ void unregister_netdevice_many(struct list_head *head) dev_xdp_uninstall(dev); + netdev_offload_xstats_disable_all(dev); + /* Notify protocols, that we are about to destroy * this device. They should clean all the things. */ -- cgit v1.2.3-71-gd317 From 0e7788fd76222dba3229eada9162efab185923fc Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:21 +0200 Subject: net: rtnetlink: Add UAPI for obtaining L3 offload xstats Add a new IFLA_STATS_LINK_OFFLOAD_XSTATS child attribute, IFLA_OFFLOAD_XSTATS_L3_STATS, to carry statistics for traffic that takes place in a HW router. The offloaded HW stats are designed to allow per-netdevice enablement and disablement. Additionally, as a netdevice is configured, it may become or cease being suitable for binding of a HW counter. Both of these aspects need to be communicated to the userspace. To that end, add another child attribute, IFLA_OFFLOAD_XSTATS_HW_S_INFO: - attr nest IFLA_OFFLOAD_XSTATS_HW_S_INFO - attr nest IFLA_OFFLOAD_XSTATS_L3_STATS - attr IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST - {0,1} as u8 - attr IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED - {0,1} as u8 Thus this one attribute is a nest that can be used to carry information about various types of HW statistics, and indexing is very simply done by wrapping the information for a given statistics suite into the attribute that carries the suite is the RTM_GETSTATS query. At the same time, because _HW_S_INFO is nested directly below IFLA_STATS_LINK_OFFLOAD_XSTATS, it is possible through filtering to request only the metadata about individual statistics suites, without having to hit the HW to get the actual counters. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 11 +++ net/core/rtnetlink.c | 170 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ef6a62a2e15d..b1031f481d2f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1249,10 +1249,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4db1d6c01a7d..9ce894a9454c 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5091,10 +5091,110 @@ rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id, return 0; } +static unsigned int +rtnl_offload_xstats_get_size_stats(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + bool enabled = netdev_offload_xstats_enabled(dev, type); + + return enabled ? sizeof(struct rtnl_hw_stats64) : 0; +} + +struct rtnl_offload_xstats_request_used { + bool request; + bool used; +}; + +static int +rtnl_offload_xstats_get_stats(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_offload_xstats_request_used *ru, + struct rtnl_hw_stats64 *stats, + struct netlink_ext_ack *extack) +{ + bool request; + bool used; + int err; + + request = netdev_offload_xstats_enabled(dev, type); + if (!request) { + used = false; + goto out; + } + + err = netdev_offload_xstats_get(dev, type, stats, &used, extack); + if (err) + return err; + +out: + if (ru) { + ru->request = request; + ru->used = used; + } + return 0; +} + +static int +rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id, + struct rtnl_offload_xstats_request_used *ru) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, attr_id); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request)) + goto nla_put_failure; + + if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int +rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev, + struct netlink_ext_ack *extack) +{ + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; + struct rtnl_offload_xstats_request_used ru_l3; + struct nlattr *nest; + int err; + + err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack); + if (err) + return err; + + nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO); + if (!nest) + return -EMSGSIZE; + + if (rtnl_offload_xstats_fill_hw_s_info_one(skb, + IFLA_OFFLOAD_XSTATS_L3_STATS, + &ru_l3)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, int *prividx, u32 off_filter_mask, struct netlink_ext_ack *extack) { + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; + int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO; + int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS; int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; bool have_data = false; int err; @@ -5111,6 +5211,40 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, } } + if (*prividx <= attr_id_hw_s_info && + (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) { + *prividx = attr_id_hw_s_info; + + err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack); + if (err) + return err; + + have_data = true; + *prividx = 0; + } + + if (*prividx <= attr_id_l3_stats && + (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) { + unsigned int size_l3; + struct nlattr *attr; + + *prividx = attr_id_l3_stats; + + size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3); + attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3, + IFLA_OFFLOAD_XSTATS_UNSPEC); + if (!attr) + return -EMSGSIZE; + + err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL, + nla_data(attr), extack); + if (err) + return err; + + have_data = true; + *prividx = 0; + } + if (!have_data) return -ENODATA; @@ -5118,9 +5252,35 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev, return 0; } +static unsigned int +rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev, + enum netdev_offload_xstats_type type) +{ + bool enabled = netdev_offload_xstats_enabled(dev, type); + + return nla_total_size(0) + + /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */ + nla_total_size(sizeof(u8)) + + /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */ + (enabled ? nla_total_size(sizeof(u8)) : 0) + + 0; +} + +static unsigned int +rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev) +{ + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; + + return nla_total_size(0) + + /* IFLA_OFFLOAD_XSTATS_L3_STATS */ + rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) + + 0; +} + static int rtnl_offload_xstats_get_size(const struct net_device *dev, u32 off_filter_mask) { + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT; int nla_size = 0; int size; @@ -5131,6 +5291,16 @@ static int rtnl_offload_xstats_get_size(const struct net_device *dev, nla_size += nla_total_size_64bit(size); } + if (off_filter_mask & + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO)) + nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev); + + if (off_filter_mask & + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) { + size = rtnl_offload_xstats_get_size_stats(dev, t_l3); + nla_size += nla_total_size_64bit(size); + } + if (nla_size != 0) nla_size += nla_total_size(0); -- cgit v1.2.3-71-gd317 From 03ba35667091337d8e632cf4b814f1c1b914609b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:22 +0200 Subject: net: rtnetlink: Add RTM_SETSTATS The offloaded HW stats are designed to allow per-netdevice enablement and disablement. These stats are only accessible through RTM_GETSTATS, and therefore should be toggled by a RTM_SETSTATS message. Add it, and the necessary skeleton handler. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 2 ++ net/core/rtnetlink.c | 66 ++++++++++++++++++++++++++++++++++++++++++ security/selinux/nlmsgtab.c | 1 + 3 files changed, 69 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 0970cb4b1b88..14462dc159fd 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -146,6 +146,8 @@ enum { #define RTM_NEWSTATS RTM_NEWSTATS RTM_GETSTATS = 94, #define RTM_GETSTATS RTM_GETSTATS + RTM_SETSTATS, +#define RTM_SETSTATS RTM_SETSTATS RTM_NEWCACHEREPORT = 96, #define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9ce894a9454c..d09354514355 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5564,6 +5564,10 @@ rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { NLA_POLICY_NESTED(rtnl_stats_get_policy_filters), }; +static const struct nla_policy +ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { +}; + static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, struct rtnl_stats_dump_filters *filters, struct netlink_ext_ack *extack) @@ -5769,6 +5773,67 @@ out: return skb->len; } +static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct rtnl_stats_dump_filters response_filters = {}; + struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; + struct net *net = sock_net(skb->sk); + struct net_device *dev = NULL; + int idxattr = 0, prividx = 0; + struct if_stats_msg *ifsm; + struct sk_buff *nskb; + int err; + + err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), + false, extack); + if (err) + return err; + + ifsm = nlmsg_data(nlh); + if (ifsm->family != AF_UNSPEC) { + NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC"); + return -EINVAL; + } + + if (ifsm->ifindex > 0) + dev = __dev_get_by_index(net, ifsm->ifindex); + else + return -EINVAL; + + if (!dev) + return -ENODEV; + + if (ifsm->filter_mask) { + NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set"); + return -EINVAL; + } + + err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX, + ifla_stats_set_policy, extack); + if (err < 0) + return err; + + nskb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), + GFP_KERNEL); + if (!nskb) + return -ENOBUFS; + + err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, + NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, + 0, &response_filters, &idxattr, &prividx, + extack); + if (err < 0) { + /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(nskb); + } else { + err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); + } + + return err; +} + /* Process one rtnetlink message. */ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -5994,4 +6059,5 @@ void __init rtnetlink_init(void) rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, 0); + rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0); } diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 6ad3ee02e023..d8ceee9e0d6f 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -76,6 +76,7 @@ static const struct nlmsg_perm nlmsg_route_perms[] = { RTM_GETNSID, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_SETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, -- cgit v1.2.3-71-gd317 From 5fd0b838efac16046509f7fb100455d0463b9687 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 2 Mar 2022 18:31:23 +0200 Subject: net: rtnetlink: Add UAPI toggle for IFLA_OFFLOAD_XSTATS_L3_STATS The offloaded HW stats are designed to allow per-netdevice enablement and disablement. Add an attribute, IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, which should be carried by the RTM_SETSTATS message, and expresses a desire to toggle L3 offload xstats on or off. As part of the above, add an exported function rtnl_offload_xstats_notify() that drivers can use when they have installed or deinstalled the counters backing the HW stats. At this point, it is possible to enable, disable and query L3 offload xstats on netdevices. (However there is no driver actually implementing these.) Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 3 ++ include/uapi/linux/if_link.h | 1 + include/uapi/linux/rtnetlink.h | 2 ++ net/core/rtnetlink.c | 75 ++++++++++++++++++++++++++++++++---------- 4 files changed, 64 insertions(+), 17 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bb9cb84114c1..7f970b16da3a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -134,4 +134,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int (*vlan_fill)(struct sk_buff *skb, struct net_device *dev, u32 filter_mask)); + +extern void rtnl_offload_xstats_notify(struct net_device *dev); + #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b1031f481d2f..ddca20357e7e 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1227,6 +1227,7 @@ enum { IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with * a filter mask for the corresponding group. */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ __IFLA_STATS_GETSET_MAX, }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 14462dc159fd..51530aade46e 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -767,6 +767,8 @@ enum rtnetlink_groups { #define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR RTNLGRP_TUNNEL, #define RTNLGRP_TUNNEL RTNLGRP_TUNNEL + RTNLGRP_STATS, +#define RTNLGRP_STATS RTNLGRP_STATS __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d09354514355..a66b6761b88b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5566,6 +5566,7 @@ rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = { static const struct nla_policy ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = { + [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1), }; static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters, @@ -5773,16 +5774,51 @@ out: return skb->len; } +void rtnl_offload_xstats_notify(struct net_device *dev) +{ + struct rtnl_stats_dump_filters response_filters = {}; + struct net *net = dev_net(dev); + int idxattr = 0, prividx = 0; + struct sk_buff *skb; + int err = -ENOBUFS; + + ASSERT_RTNL(); + + response_filters.mask[0] |= + IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); + response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); + + skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), + GFP_KERNEL); + if (!skb) + goto errout; + + err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0, + &response_filters, &idxattr, &prividx, NULL); + if (err < 0) { + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL); + return; + +errout: + rtnl_set_sk_err(net, RTNLGRP_STATS, err); +} +EXPORT_SYMBOL(rtnl_offload_xstats_notify); + static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3; struct rtnl_stats_dump_filters response_filters = {}; struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1]; struct net *net = sock_net(skb->sk); struct net_device *dev = NULL; - int idxattr = 0, prividx = 0; struct if_stats_msg *ifsm; - struct sk_buff *nskb; + bool notify = false; int err; err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb), @@ -5814,24 +5850,29 @@ static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) return err; - nskb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters), - GFP_KERNEL); - if (!nskb) - return -ENOBUFS; + if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) { + u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]); - err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS, - NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, - 0, &response_filters, &idxattr, &prividx, - extack); - if (err < 0) { - /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */ - WARN_ON(err == -EMSGSIZE); - kfree_skb(nskb); - } else { - err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); + if (req) + err = netdev_offload_xstats_enable(dev, t_l3, extack); + else + err = netdev_offload_xstats_disable(dev, t_l3); + + if (!err) + notify = true; + else if (err != -EALREADY) + return err; + + response_filters.mask[0] |= + IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS); + response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |= + IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO); } - return err; + if (notify) + rtnl_offload_xstats_notify(dev); + + return 0; } /* Process one rtnetlink message. */ -- cgit v1.2.3-71-gd317 From 115dcec65f61d53e25e1bed5e380468b30f98b14 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:18 +0200 Subject: vfio: Define device migration protocol v2 Replace the existing region based migration protocol with an ioctl based protocol. The two protocols have the same general semantic behaviors, but the way the data is transported is changed. This is the STOP_COPY portion of the new protocol, it defines the 5 states for basic stop and copy migration and the protocol to move the migration data in/out of the kernel. Compared to the clarification of the v1 protocol Alex proposed: https://lore.kernel.org/r/163909282574.728533.7460416142511440919.stgit@omen This has a few deliberate functional differences: - ERROR arcs allow the device function to remain unchanged. - The protocol is not required to return to the original state on transition failure. Instead userspace can execute an unwind back to the original state, reset, or do something else without needing kernel support. This simplifies the kernel design and should userspace choose a policy like always reset, avoids doing useless work in the kernel on error handling paths. - PRE_COPY is made optional, userspace must discover it before using it. This reflects the fact that the majority of drivers we are aware of right now will not implement PRE_COPY. - segmentation is not part of the data stream protocol, the receiver does not have to reproduce the framing boundaries. The hybrid FSM for the device_state is described as a Mealy machine by documenting each of the arcs the driver is required to implement. Defining the remaining set of old/new device_state transitions as 'combination transitions' which are naturally defined as taking multiple FSM arcs along the shortest path within the FSM's digraph allows a complete matrix of transitions. A new VFIO_DEVICE_FEATURE of VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE is defined to replace writing to the device_state field in the region. This allows returning a brand new FD whenever the requested transition opens a data transfer session. The VFIO core code implements the new feature and provides a helper function to the driver. Using the helper the driver only has to implement 6 of the FSM arcs and the other combination transitions are elaborated consistently from those arcs. A new VFIO_DEVICE_FEATURE of VFIO_DEVICE_FEATURE_MIGRATION is defined to report the capability for migration and indicate which set of states and arcs are supported by the device. The FSM provides a lot of flexibility to make backwards compatible extensions but the VFIO_DEVICE_FEATURE also allows for future breaking extensions for scenarios that cannot support even the basic STOP_COPY requirements. The VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE with the GET option (i.e. VFIO_DEVICE_FEATURE_GET) can be used to read the current migration state of the VFIO device. Data transfer sessions are now carried over a file descriptor, instead of the region. The FD functions for the lifetime of the data transfer session. read() and write() transfer the data with normal Linux stream FD semantics. This design allows future expansion to support poll(), io_uring, and other performance optimizations. The complicated mmap mode for data transfer is discarded as current qemu doesn't take meaningful advantage of it, and the new qemu implementation avoids substantially all the performance penalty of using a read() on the region. Link: https://lore.kernel.org/all/20220224142024.147653-10-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Reviewed-by: Cornelia Huck Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/vfio.c | 199 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 20 +++++ include/uapi/linux/vfio.h | 174 +++++++++++++++++++++++++++++++++++++--- 3 files changed, 380 insertions(+), 13 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 71763e2ac561..b37ab27b511f 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1557,6 +1557,197 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) return 0; } +/* + * vfio_mig_get_next_state - Compute the next step in the FSM + * @cur_fsm - The current state the device is in + * @new_fsm - The target state to reach + * @next_fsm - Pointer to the next step to get to new_fsm + * + * Return 0 upon success, otherwise -errno + * Upon success the next step in the state progression between cur_fsm and + * new_fsm will be set in next_fsm. + * + * This breaks down requests for combination transitions into smaller steps and + * returns the next step to get to new_fsm. The function may need to be called + * multiple times before reaching new_fsm. + * + */ +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm) +{ + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RESUMING + 1 }; + /* + * The coding in this table requires the driver to implement 6 + * FSM arcs: + * RESUMING -> STOP + * RUNNING -> STOP + * STOP -> RESUMING + * STOP -> RUNNING + * STOP -> STOP_COPY + * STOP_COPY -> STOP + * + * The coding will step through multiple states for these combination + * transitions: + * RESUMING -> STOP -> RUNNING + * RESUMING -> STOP -> STOP_COPY + * RUNNING -> STOP -> RESUMING + * RUNNING -> STOP -> STOP_COPY + * STOP_COPY -> STOP -> RESUMING + * STOP_COPY -> STOP -> RUNNING + */ + static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_STOP_COPY] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RESUMING] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_ERROR] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + }; + + if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table))) + return -EINVAL; + + if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table)) + return -EINVAL; + + *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; + return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); + +/* + * Convert the drivers's struct file into a FD number and return it to userspace + */ +static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg, + struct vfio_device_feature_mig_state *mig) +{ + int ret; + int fd; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ret = fd; + goto out_fput; + } + + mig->data_fd = fd; + if (copy_to_user(arg, mig, sizeof(*mig))) { + ret = -EFAULT; + goto out_put_unused; + } + fd_install(fd, filp); + return 0; + +out_put_unused: + put_unused_fd(fd); +out_fput: + fput(filp); + return ret; +} + +static int +vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + size_t minsz = + offsetofend(struct vfio_device_feature_mig_state, data_fd); + struct vfio_device_feature_mig_state mig; + struct file *filp = NULL; + int ret; + + if (!device->ops->migration_set_state || + !device->ops->migration_get_state) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + + if (copy_from_user(&mig, arg, minsz)) + return -EFAULT; + + if (flags & VFIO_DEVICE_FEATURE_GET) { + enum vfio_device_mig_state curr_state; + + ret = device->ops->migration_get_state(device, &curr_state); + if (ret) + return ret; + mig.device_state = curr_state; + goto out_copy; + } + + /* Handle the VFIO_DEVICE_FEATURE_SET */ + filp = device->ops->migration_set_state(device, mig.device_state); + if (IS_ERR(filp) || !filp) + goto out_copy; + + return vfio_ioct_mig_return_fd(filp, arg, &mig); +out_copy: + mig.data_fd = -1; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + if (IS_ERR(filp)) + return PTR_ERR(filp); + return 0; +} + +static int vfio_ioctl_device_feature_migration(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + struct vfio_device_feature_migration mig = { + .flags = VFIO_MIGRATION_STOP_COPY, + }; + int ret; + + if (!device->ops->migration_set_state || + !device->ops->migration_get_state) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, + sizeof(mig)); + if (ret != 1) + return ret; + if (copy_to_user(arg, &mig, sizeof(mig))) + return -EFAULT; + return 0; +} + static int vfio_ioctl_device_feature(struct vfio_device *device, struct vfio_device_feature __user *arg) { @@ -1582,6 +1773,14 @@ static int vfio_ioctl_device_feature(struct vfio_device *device, return -EINVAL; switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_MIGRATION: + return vfio_ioctl_device_feature_migration( + device, feature.flags, arg->data, + feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: + return vfio_ioctl_device_feature_mig_device_state( + device, feature.flags, arg->data, + feature.argsz - minsz); default: if (unlikely(!device->ops->device_feature)) return -EINVAL; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 550c28f2ef60..c44e80bbbd3b 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -56,6 +56,16 @@ struct vfio_device { * match, -errno for abort (ex. match with insufficient or incorrect * additional args) * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl + * @migration_set_state: Optional callback to change the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. + * The returned FD is used for data transfer according to the FSM + * definition. The driver is responsible to ensure that FD reaches end + * of stream or error whenever the migration FSM leaves a data transfer + * state or before close_device() returns. + * @migration_get_state: Optional callback to get the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. */ struct vfio_device_ops { char *name; @@ -72,6 +82,11 @@ struct vfio_device_ops { int (*match)(struct vfio_device *vdev, char *buf); int (*device_feature)(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz); + struct file *(*migration_set_state)( + struct vfio_device *device, + enum vfio_device_mig_state new_state); + int (*migration_get_state)(struct vfio_device *device, + enum vfio_device_mig_state *curr_state); }; /** @@ -114,6 +129,11 @@ extern void vfio_device_put(struct vfio_device *device); int vfio_assign_device_set(struct vfio_device *device, void *set_id); +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm); + /* * External user API */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index ef33ea002b0b..22ed358c04c5 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -605,25 +605,25 @@ struct vfio_region_gfx_edid { struct vfio_device_migration_info { __u32 device_state; /* VFIO device state */ -#define VFIO_DEVICE_STATE_STOP (0) -#define VFIO_DEVICE_STATE_RUNNING (1 << 0) -#define VFIO_DEVICE_STATE_SAVING (1 << 1) -#define VFIO_DEVICE_STATE_RESUMING (1 << 2) -#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ - VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) +#define VFIO_DEVICE_STATE_V1_STOP (0) +#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0) +#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1) +#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2) +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \ + VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) #define VFIO_DEVICE_STATE_VALID(state) \ - (state & VFIO_DEVICE_STATE_RESUMING ? \ - (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) + (state & VFIO_DEVICE_STATE_V1_RESUMING ? \ + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1) #define VFIO_DEVICE_STATE_IS_ERROR(state) \ - ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING)) + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING)) #define VFIO_DEVICE_STATE_SET_ERROR(state) \ - ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; __u64 pending_bytes; @@ -1002,6 +1002,154 @@ struct vfio_device_feature { */ #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) +/* + * Indicates the device can support the migration API through + * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and + * ERROR states are always supported. Support for additional states is + * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be + * set. + * + * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and + * RESUMING are supported. + */ +struct vfio_device_feature_migration { + __aligned_u64 flags; +#define VFIO_MIGRATION_STOP_COPY (1 << 0) +}; +#define VFIO_DEVICE_FEATURE_MIGRATION 1 + +/* + * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO + * device. The new state is supplied in device_state, see enum + * vfio_device_mig_state for details + * + * The kernel migration driver must fully transition the device to the new state + * value before the operation returns to the user. + * + * The kernel migration driver must not generate asynchronous device state + * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET + * ioctl as described above. + * + * If this function fails then current device_state may be the original + * operating state or some other state along the combination transition path. + * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt + * to return to the original state, or attempt to return to some other state + * such as RUNNING or STOP. + * + * If the new_state starts a new data transfer session then the FD associated + * with that session is returned in data_fd. The user is responsible to close + * this FD when it is finished. The user must consider the migration data stream + * carried over the FD to be opaque and must preserve the byte order of the + * stream. The user is not required to preserve buffer segmentation when writing + * the data stream during the RESUMING operation. + * + * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO + * device, data_fd will be -1. + */ +struct vfio_device_feature_mig_state { + __u32 device_state; /* From enum vfio_device_mig_state */ + __s32 data_fd; +}; +#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2 + +/* + * The device migration Finite State Machine is described by the enum + * vfio_device_mig_state. Some of the FSM arcs will create a migration data + * transfer session by returning a FD, in this case the migration data will + * flow over the FD using read() and write() as discussed below. + * + * There are 5 states to support VFIO_MIGRATION_STOP_COPY: + * RUNNING - The device is running normally + * STOP - The device does not change the internal or external state + * STOP_COPY - The device internal state can be read out + * RESUMING - The device is stopped and is loading a new internal state + * ERROR - The device has failed and must be reset + * + * The FSM takes actions on the arcs between FSM states. The driver implements + * the following behavior for the FSM arcs: + * + * RUNNING -> STOP + * STOP_COPY -> STOP + * While in STOP the device must stop the operation of the device. The device + * must not generate interrupts, DMA, or any other change to external state. + * It must not change its internal state. When stopped the device and kernel + * migration driver must accept and respond to interaction to support external + * subsystems in the STOP state, for example PCI MSI-X and PCI config space. + * Failure by the user to restrict device access while in STOP must not result + * in error conditions outside the user context (ex. host system faults). + * + * The STOP_COPY arc will terminate a data transfer session. + * + * RESUMING -> STOP + * Leaving RESUMING terminates a data transfer session and indicates the + * device should complete processing of the data delivered by write(). The + * kernel migration driver should complete the incorporation of data written + * to the data transfer FD into the device internal state and perform + * final validity and consistency checking of the new device state. If the + * user provided data is found to be incomplete, inconsistent, or otherwise + * invalid, the migration driver must fail the SET_STATE ioctl and + * optionally go to the ERROR state as described below. + * + * While in STOP the device has the same behavior as other STOP states + * described above. + * + * To abort a RESUMING session the device must be reset. + * + * STOP -> RUNNING + * While in RUNNING the device is fully operational, the device may generate + * interrupts, DMA, respond to MMIO, all vfio device regions are functional, + * and the device may advance its internal state. + * + * STOP -> STOP_COPY + * This arc begin the process of saving the device state and will return a + * new data_fd. + * + * While in the STOP_COPY state the device has the same behavior as STOP + * with the addition that the data transfers session continues to stream the + * migration state. End of stream on the FD indicates the entire device + * state has been transferred. + * + * The user should take steps to restrict access to vfio device regions while + * the device is in STOP_COPY or risk corruption of the device migration data + * stream. + * + * STOP -> RESUMING + * Entering the RESUMING state starts a process of restoring the device state + * and will return a new data_fd. The data stream fed into the data_fd should + * be taken from the data transfer output of a single FD during saving from + * a compatible device. The migration driver may alter/reset the internal + * device state for this arc if required to prepare the device to receive the + * migration data. + * + * any -> ERROR + * ERROR cannot be specified as a device state, however any transition request + * can be failed with an errno return and may then move the device_state into + * ERROR. In this case the device was unable to execute the requested arc and + * was also unable to restore the device to any valid device_state. + * To recover from ERROR VFIO_DEVICE_RESET must be used to return the + * device_state back to RUNNING. + * + * The remaining possible transitions are interpreted as combinations of the + * above FSM arcs. As there are multiple paths through the FSM arcs the path + * should be selected based on the following rules: + * - Select the shortest path. + * Refer to vfio_mig_get_next_state() for the result of the algorithm. + * + * The automatic transit through the FSM arcs that make up the combination + * transition is invisible to the user. When working with combination arcs the + * user may see any step along the path in the device_state if SET_STATE + * fails. When handling these types of errors users should anticipate future + * revisions of this protocol using new states and those states becoming + * visible in this case. + */ +enum vfio_device_mig_state { + VFIO_DEVICE_STATE_ERROR = 0, + VFIO_DEVICE_STATE_STOP = 1, + VFIO_DEVICE_STATE_RUNNING = 2, + VFIO_DEVICE_STATE_STOP_COPY = 3, + VFIO_DEVICE_STATE_RESUMING = 4, +}; + /* -------- API for Type1 VFIO IOMMU -------- */ /** -- cgit v1.2.3-71-gd317 From 8cb3d83b959be0631cd719b995c40c3cda21cd47 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:19 +0200 Subject: vfio: Extend the device migration protocol with RUNNING_P2P The RUNNING_P2P state is designed to support multiple devices in the same VM that are doing P2P transactions between themselves. When in RUNNING_P2P the device must be able to accept incoming P2P transactions but should not generate outgoing P2P transactions. As an optional extension to the mandatory states it is defined as in between STOP and RUNNING: STOP -> RUNNING_P2P -> RUNNING -> RUNNING_P2P -> STOP For drivers that are unable to support RUNNING_P2P the core code silently merges RUNNING_P2P and RUNNING together. Unless driver support is present, the new state cannot be used in SET_STATE. Drivers that support this will be required to implement 4 FSM arcs beyond the basic FSM. 2 of the basic FSM arcs become combination transitions. Compared to the v1 clarification, NDMA is redefined into FSM states and is described in terms of the desired P2P quiescent behavior, noting that halting all DMA is an acceptable implementation. Link: https://lore.kernel.org/all/20220224142024.147653-11-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- drivers/vfio/vfio.c | 87 +++++++++++++++++++++++++++++++++++++---------- include/linux/vfio.h | 1 + include/uapi/linux/vfio.h | 36 ++++++++++++++++++-- 3 files changed, 104 insertions(+), 20 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index b37ab27b511f..a4555014bd1e 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -1577,39 +1577,56 @@ int vfio_mig_get_next_state(struct vfio_device *device, enum vfio_device_mig_state new_fsm, enum vfio_device_mig_state *next_fsm) { - enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RESUMING + 1 }; + enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_RUNNING_P2P + 1 }; /* - * The coding in this table requires the driver to implement 6 - * FSM arcs: + * The coding in this table requires the driver to implement the + * following FSM arcs: * RESUMING -> STOP - * RUNNING -> STOP * STOP -> RESUMING - * STOP -> RUNNING * STOP -> STOP_COPY * STOP_COPY -> STOP * - * The coding will step through multiple states for these combination - * transitions: - * RESUMING -> STOP -> RUNNING + * If P2P is supported then the driver must also implement these FSM + * arcs: + * RUNNING -> RUNNING_P2P + * RUNNING_P2P -> RUNNING + * RUNNING_P2P -> STOP + * STOP -> RUNNING_P2P + * Without P2P the driver must implement: + * RUNNING -> STOP + * STOP -> RUNNING + * + * The coding will step through multiple states for some combination + * transitions; if all optional features are supported, this means the + * following ones: + * RESUMING -> STOP -> RUNNING_P2P + * RESUMING -> STOP -> RUNNING_P2P -> RUNNING * RESUMING -> STOP -> STOP_COPY - * RUNNING -> STOP -> RESUMING - * RUNNING -> STOP -> STOP_COPY + * RUNNING -> RUNNING_P2P -> STOP + * RUNNING -> RUNNING_P2P -> STOP -> RESUMING + * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY + * RUNNING_P2P -> STOP -> RESUMING + * RUNNING_P2P -> STOP -> STOP_COPY + * STOP -> RUNNING_P2P -> RUNNING * STOP_COPY -> STOP -> RESUMING - * STOP_COPY -> STOP -> RUNNING + * STOP_COPY -> STOP -> RUNNING_P2P + * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING */ static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, - [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RUNNING] = { - [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, - [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, - [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_STOP_COPY] = { @@ -1617,6 +1634,7 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RESUMING] = { @@ -1624,6 +1642,15 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, + }, + [VFIO_DEVICE_STATE_RUNNING_P2P] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_ERROR] = { @@ -1631,17 +1658,41 @@ int vfio_mig_get_next_state(struct vfio_device *device, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, + [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, }; - if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table))) + static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { + [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, + [VFIO_DEVICE_STATE_RUNNING_P2P] = + VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P, + [VFIO_DEVICE_STATE_ERROR] = ~0U, + }; + + if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[cur_fsm] & device->migration_flags) != + state_flags_table[cur_fsm])) return -EINVAL; - if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table)) + if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || + (state_flags_table[new_fsm] & device->migration_flags) != + state_flags_table[new_fsm]) return -EINVAL; + /* + * Arcs touching optional and unsupported states are skipped over. The + * driver will instead see an arc from the original state to the next + * logical state, as per the above comment. + */ *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; + while ((state_flags_table[*next_fsm] & device->migration_flags) != + state_flags_table[*next_fsm]) + *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm]; + return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); @@ -1731,7 +1782,7 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device, size_t argsz) { struct vfio_device_feature_migration mig = { - .flags = VFIO_MIGRATION_STOP_COPY, + .flags = device->migration_flags, }; int ret; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index c44e80bbbd3b..66dda06ec42d 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -33,6 +33,7 @@ struct vfio_device { struct vfio_group *group; struct vfio_device_set *dev_set; struct list_head dev_set_list; + unsigned int migration_flags; /* Members below here are private, not for driver use */ refcount_t refcount; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 22ed358c04c5..26a66f68371d 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -1011,10 +1011,16 @@ struct vfio_device_feature { * * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and * RESUMING are supported. + * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P + * is supported in addition to the STOP_COPY states. + * + * Other combinations of flags have behavior to be defined in the future. */ struct vfio_device_feature_migration { __aligned_u64 flags; #define VFIO_MIGRATION_STOP_COPY (1 << 0) +#define VFIO_MIGRATION_P2P (1 << 1) }; #define VFIO_DEVICE_FEATURE_MIGRATION 1 @@ -1065,10 +1071,13 @@ struct vfio_device_feature_mig_state { * RESUMING - The device is stopped and is loading a new internal state * ERROR - The device has failed and must be reset * + * And 1 optional state to support VFIO_MIGRATION_P2P: + * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA + * * The FSM takes actions on the arcs between FSM states. The driver implements * the following behavior for the FSM arcs: * - * RUNNING -> STOP + * RUNNING_P2P -> STOP * STOP_COPY -> STOP * While in STOP the device must stop the operation of the device. The device * must not generate interrupts, DMA, or any other change to external state. @@ -1095,11 +1104,16 @@ struct vfio_device_feature_mig_state { * * To abort a RESUMING session the device must be reset. * - * STOP -> RUNNING + * RUNNING_P2P -> RUNNING * While in RUNNING the device is fully operational, the device may generate * interrupts, DMA, respond to MMIO, all vfio device regions are functional, * and the device may advance its internal state. * + * RUNNING -> RUNNING_P2P + * STOP -> RUNNING_P2P + * While in RUNNING_P2P the device is partially running in the P2P quiescent + * state defined below. + * * STOP -> STOP_COPY * This arc begin the process of saving the device state and will return a * new data_fd. @@ -1129,6 +1143,18 @@ struct vfio_device_feature_mig_state { * To recover from ERROR VFIO_DEVICE_RESET must be used to return the * device_state back to RUNNING. * + * The optional peer to peer (P2P) quiescent state is intended to be a quiescent + * state for the device for the purposes of managing multiple devices within a + * user context where peer-to-peer DMA between devices may be active. The + * RUNNING_P2P states must prevent the device from initiating + * any new P2P DMA transactions. If the device can identify P2P transactions + * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration + * driver must complete any such outstanding operations prior to completing the + * FSM arc into a P2P state. For the purpose of specification the states + * behave as though the device was fully running if not supported. Like while in + * STOP or STOP_COPY the user must not touch the device, otherwise the state + * can be exited. + * * The remaining possible transitions are interpreted as combinations of the * above FSM arcs. As there are multiple paths through the FSM arcs the path * should be selected based on the following rules: @@ -1141,6 +1167,11 @@ struct vfio_device_feature_mig_state { * fails. When handling these types of errors users should anticipate future * revisions of this protocol using new states and those states becoming * visible in this case. + * + * The optional states cannot be used with SET_STATE if the device does not + * support them. The user can discover if these states are supported by using + * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can + * avoid knowing about these optional states if the kernel driver supports them. */ enum vfio_device_mig_state { VFIO_DEVICE_STATE_ERROR = 0, @@ -1148,6 +1179,7 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_RUNNING = 2, VFIO_DEVICE_STATE_STOP_COPY = 3, VFIO_DEVICE_STATE_RESUMING = 4, + VFIO_DEVICE_STATE_RUNNING_P2P = 5, }; /* -------- API for Type1 VFIO IOMMU -------- */ -- cgit v1.2.3-71-gd317 From 0f3f9cd7f752f6e685e378620babc5e34af6fb9f Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 24 Feb 2022 16:20:20 +0200 Subject: vfio: Remove migration protocol v1 documentation v1 was never implemented and is replaced by v2. The old uAPI documentation is removed from the header file. The old uAPI definitions are still kept in the header file to ease transition for userspace copying these headers. They will be fully removed down the road. Link: https://lore.kernel.org/all/20220224142024.147653-12-yishaih@nvidia.com Signed-off-by: Jason Gunthorpe Tested-by: Shameer Kolothum Reviewed-by: Alex Williamson Reviewed-by: Cornelia Huck Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky --- include/uapi/linux/vfio.h | 200 +--------------------------------------------- 1 file changed, 2 insertions(+), 198 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 26a66f68371d..fea86061b44e 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -323,7 +323,7 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) #define VFIO_REGION_TYPE_GFX (1) #define VFIO_REGION_TYPE_CCW (2) -#define VFIO_REGION_TYPE_MIGRATION (3) +#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3) /* sub-types for VFIO_REGION_TYPE_PCI_* */ @@ -405,203 +405,7 @@ struct vfio_region_gfx_edid { #define VFIO_REGION_SUBTYPE_CCW_CRW (3) /* sub-types for VFIO_REGION_TYPE_MIGRATION */ -#define VFIO_REGION_SUBTYPE_MIGRATION (1) - -/* - * The structure vfio_device_migration_info is placed at the 0th offset of - * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related - * migration information. Field accesses from this structure are only supported - * at their native width and alignment. Otherwise, the result is undefined and - * vendor drivers should return an error. - * - * device_state: (read/write) - * - The user application writes to this field to inform the vendor driver - * about the device state to be transitioned to. - * - The vendor driver should take the necessary actions to change the - * device state. After successful transition to a given state, the - * vendor driver should return success on write(device_state, state) - * system call. If the device state transition fails, the vendor driver - * should return an appropriate -errno for the fault condition. - * - On the user application side, if the device state transition fails, - * that is, if write(device_state, state) returns an error, read - * device_state again to determine the current state of the device from - * the vendor driver. - * - The vendor driver should return previous state of the device unless - * the vendor driver has encountered an internal error, in which case - * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR. - * - The user application must use the device reset ioctl to recover the - * device from VFIO_DEVICE_STATE_ERROR state. If the device is - * indicated to be in a valid device state by reading device_state, the - * user application may attempt to transition the device to any valid - * state reachable from the current state or terminate itself. - * - * device_state consists of 3 bits: - * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear, - * it indicates the _STOP state. When the device state is changed to - * _STOP, driver should stop the device before write() returns. - * - If bit 1 is set, it indicates the _SAVING state, which means that the - * driver should start gathering device state information that will be - * provided to the VFIO user application to save the device's state. - * - If bit 2 is set, it indicates the _RESUMING state, which means that - * the driver should prepare to resume the device. Data provided through - * the migration region should be used to resume the device. - * Bits 3 - 31 are reserved for future use. To preserve them, the user - * application should perform a read-modify-write operation on this - * field when modifying the specified bits. - * - * +------- _RESUMING - * |+------ _SAVING - * ||+----- _RUNNING - * ||| - * 000b => Device Stopped, not saving or resuming - * 001b => Device running, which is the default state - * 010b => Stop the device & save the device state, stop-and-copy state - * 011b => Device running and save the device state, pre-copy state - * 100b => Device stopped and the device state is resuming - * 101b => Invalid state - * 110b => Error state - * 111b => Invalid state - * - * State transitions: - * - * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP - * (100b) (001b) (011b) (010b) (000b) - * 0. Running or default state - * | - * - * 1. Normal Shutdown (optional) - * |------------------------------------->| - * - * 2. Save the state or suspend - * |------------------------->|---------->| - * - * 3. Save the state during live migration - * |----------->|------------>|---------->| - * - * 4. Resuming - * |<---------| - * - * 5. Resumed - * |--------->| - * - * 0. Default state of VFIO device is _RUNNING when the user application starts. - * 1. During normal shutdown of the user application, the user application may - * optionally change the VFIO device state from _RUNNING to _STOP. This - * transition is optional. The vendor driver must support this transition but - * must not require it. - * 2. When the user application saves state or suspends the application, the - * device state transitions from _RUNNING to stop-and-copy and then to _STOP. - * On state transition from _RUNNING to stop-and-copy, driver must stop the - * device, save the device state and send it to the application through the - * migration region. The sequence to be followed for such transition is given - * below. - * 3. In live migration of user application, the state transitions from _RUNNING - * to pre-copy, to stop-and-copy, and to _STOP. - * On state transition from _RUNNING to pre-copy, the driver should start - * gathering the device state while the application is still running and send - * the device state data to application through the migration region. - * On state transition from pre-copy to stop-and-copy, the driver must stop - * the device, save the device state and send it to the user application - * through the migration region. - * Vendor drivers must support the pre-copy state even for implementations - * where no data is provided to the user before the stop-and-copy state. The - * user must not be required to consume all migration data before the device - * transitions to a new state, including the stop-and-copy state. - * The sequence to be followed for above two transitions is given below. - * 4. To start the resuming phase, the device state should be transitioned from - * the _RUNNING to the _RESUMING state. - * In the _RESUMING state, the driver should use the device state data - * received through the migration region to resume the device. - * 5. After providing saved device data to the driver, the application should - * change the state from _RESUMING to _RUNNING. - * - * reserved: - * Reads on this field return zero and writes are ignored. - * - * pending_bytes: (read only) - * The number of pending bytes still to be migrated from the vendor driver. - * - * data_offset: (read only) - * The user application should read data_offset field from the migration - * region. The user application should read the device data from this - * offset within the migration region during the _SAVING state or write - * the device data during the _RESUMING state. See below for details of - * sequence to be followed. - * - * data_size: (read/write) - * The user application should read data_size to get the size in bytes of - * the data copied in the migration region during the _SAVING state and - * write the size in bytes of the data copied in the migration region - * during the _RESUMING state. - * - * The format of the migration region is as follows: - * ------------------------------------------------------------------ - * |vfio_device_migration_info| data section | - * | | /////////////////////////////// | - * ------------------------------------------------------------------ - * ^ ^ - * offset 0-trapped part data_offset - * - * The structure vfio_device_migration_info is always followed by the data - * section in the region, so data_offset will always be nonzero. The offset - * from where the data is copied is decided by the kernel driver. The data - * section can be trapped, mmapped, or partitioned, depending on how the kernel - * driver defines the data section. The data section partition can be defined - * as mapped by the sparse mmap capability. If mmapped, data_offset must be - * page aligned, whereas initial section which contains the - * vfio_device_migration_info structure, might not end at the offset, which is - * page aligned. The user is not required to access through mmap regardless - * of the capabilities of the region mmap. - * The vendor driver should determine whether and how to partition the data - * section. The vendor driver should return data_offset accordingly. - * - * The sequence to be followed while in pre-copy state and stop-and-copy state - * is as follows: - * a. Read pending_bytes, indicating the start of a new iteration to get device - * data. Repeated read on pending_bytes at this stage should have no side - * effects. - * If pending_bytes == 0, the user application should not iterate to get data - * for that device. - * If pending_bytes > 0, perform the following steps. - * b. Read data_offset, indicating that the vendor driver should make data - * available through the data section. The vendor driver should return this - * read operation only after data is available from (region + data_offset) - * to (region + data_offset + data_size). - * c. Read data_size, which is the amount of data in bytes available through - * the migration region. - * Read on data_offset and data_size should return the offset and size of - * the current buffer if the user application reads data_offset and - * data_size more than once here. - * d. Read data_size bytes of data from (region + data_offset) from the - * migration region. - * e. Process the data. - * f. Read pending_bytes, which indicates that the data from the previous - * iteration has been read. If pending_bytes > 0, go to step b. - * - * The user application can transition from the _SAVING|_RUNNING - * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the - * number of pending bytes. The user application should iterate in _SAVING - * (stop-and-copy) until pending_bytes is 0. - * - * The sequence to be followed while _RESUMING device state is as follows: - * While data for this device is available, repeat the following steps: - * a. Read data_offset from where the user application should write data. - * b. Write migration data starting at the migration region + data_offset for - * the length determined by data_size from the migration source. - * c. Write data_size, which indicates to the vendor driver that data is - * written in the migration region. Vendor driver must return this write - * operations on consuming data. Vendor driver should apply the - * user-provided migration region data to the device resume state. - * - * If an error occurs during the above sequences, the vendor driver can return - * an error code for next read() or write() operation, which will terminate the - * loop. The user application should then take the next necessary action, for - * example, failing migration or terminating the user application. - * - * For the user application, data is opaque. The user application should write - * data in the same order as the data is received and the data should be of - * same transaction size at the source. - */ +#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1) struct vfio_device_migration_info { __u32 device_state; /* VFIO device state */ -- cgit v1.2.3-71-gd317 From 8d21ec0e46ed6e39994accff8eb4f2be3d2e76b5 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 2 Mar 2022 11:56:34 -0800 Subject: bpf: Add __sk_buff->delivery_time_type and bpf_skb_set_skb_delivery_time() * __sk_buff->delivery_time_type: This patch adds __sk_buff->delivery_time_type. It tells if the delivery_time is stored in __sk_buff->tstamp or not. It will be most useful for ingress to tell if the __sk_buff->tstamp has the (rcv) timestamp or delivery_time. If delivery_time_type is 0 (BPF_SKB_DELIVERY_TIME_NONE), it has the (rcv) timestamp. Two non-zero types are defined for the delivery_time_type, BPF_SKB_DELIVERY_TIME_MONO and BPF_SKB_DELIVERY_TIME_UNSPEC. For UNSPEC, it can only happen in egress because only mono delivery_time can be forwarded to ingress now. The clock of UNSPEC delivery_time can be deduced from the skb->sk->sk_clockid which is how the sch_etf doing it also. * Provide forwarded delivery_time to tc-bpf@ingress: With the help of the new delivery_time_type, the tc-bpf has a way to tell if the __sk_buff->tstamp has the (rcv) timestamp or the delivery_time. During bpf load time, the verifier will learn if the bpf prog has accessed the new __sk_buff->delivery_time_type. If it does, it means the tc-bpf@ingress is expecting the skb->tstamp could have the delivery_time. The kernel will then read the skb->tstamp as-is during bpf insn rewrite without checking the skb->mono_delivery_time. This is done by adding a new prog->delivery_time_access bit. The same goes for writing skb->tstamp. * bpf_skb_set_delivery_time(): The bpf_skb_set_delivery_time() helper is added to allow setting both delivery_time and the delivery_time_type at the same time. If the tc-bpf does not need to change the delivery_time_type, it can directly write to the __sk_buff->tstamp as the existing tc-bpf has already been doing. It will be most useful at ingress to change the __sk_buff->tstamp from the (rcv) timestamp to a mono delivery_time and then bpf_redirect_*(). bpf only has mono clock helper (bpf_ktime_get_ns), and the current known use case is the mono EDT for fq, and only mono delivery time can be kept during forward now, so bpf_skb_set_delivery_time() only supports setting BPF_SKB_DELIVERY_TIME_MONO. It can be extended later when use cases come up and the forwarding path also supports other clock bases. Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- include/linux/filter.h | 3 +- include/uapi/linux/bpf.h | 41 +++++++++- net/core/filter.c | 169 ++++++++++++++++++++++++++++++++--------- tools/include/uapi/linux/bpf.h | 41 +++++++++- 4 files changed, 216 insertions(+), 38 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 1cb1af917617..9bf26307247f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -572,7 +572,8 @@ struct bpf_prog { has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1; /* Do we call get_func_ip() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ + delivery_time_access:1; /* Accessed __sk_buff->delivery_time_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index afe3d0d7f5f2..4eebea830613 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5086,6 +5086,37 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. + * + * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * Description + * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also + * change the __sk_buff->delivery_time_type to *dtime_type*. + * + * When setting a delivery time (non zero *dtime*) to + * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* + * is supported. It is the only delivery_time_type that will be + * kept after bpf_redirect_*(). + * + * If there is no need to change the __sk_buff->delivery_time_type, + * the delivery time can be directly written to __sk_buff->tstamp + * instead. + * + * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE + * can be used to clear any delivery time stored in + * __sk_buff->tstamp. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * Return + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5280,6 +5311,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ + FN(skb_set_delivery_time), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5469,6 +5501,12 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +enum { + BPF_SKB_DELIVERY_TIME_NONE, + BPF_SKB_DELIVERY_TIME_UNSPEC, + BPF_SKB_DELIVERY_TIME_MONO, +}; + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -5509,7 +5547,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u32 :32; /* Padding, future use. */ + __u8 delivery_time_type; + __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; diff --git a/net/core/filter.c b/net/core/filter.c index 5072733743e9..88767f7da150 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7388,6 +7388,43 @@ static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_skb_set_delivery_time, struct sk_buff *, skb, + u64, dtime, u32, dtime_type) +{ + /* skb_clear_delivery_time() is done for inet protocol */ + if (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)) + return -EOPNOTSUPP; + + switch (dtime_type) { + case BPF_SKB_DELIVERY_TIME_MONO: + if (!dtime) + return -EINVAL; + skb->tstamp = dtime; + skb->mono_delivery_time = 1; + break; + case BPF_SKB_DELIVERY_TIME_NONE: + if (dtime) + return -EINVAL; + skb->tstamp = 0; + skb->mono_delivery_time = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct bpf_func_proto bpf_skb_set_delivery_time_proto = { + .func = bpf_skb_set_delivery_time, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + #endif /* CONFIG_INET */ bool bpf_helper_changes_pkt_data(void *func) @@ -7749,6 +7786,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_gen_syncookie_proto; case BPF_FUNC_sk_assign: return &bpf_sk_assign_proto; + case BPF_FUNC_skb_set_delivery_time: + return &bpf_skb_set_delivery_time_proto; #endif default: return bpf_sk_base_func_proto(func_id); @@ -8088,7 +8127,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; - case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1: + case offsetof(struct __sk_buff, delivery_time_type): + return false; + case offsetofend(struct __sk_buff, delivery_time_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: /* Explicitly prohibit access to padding in __sk_buff. */ return false; default: @@ -8443,6 +8484,15 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; + case offsetof(struct __sk_buff, delivery_time_type): + /* The convert_ctx_access() on reading and writing + * __sk_buff->tstamp depends on whether the bpf prog + * has used __sk_buff->delivery_time_type or not. + * Thus, we need to set prog->delivery_time_access + * earlier during is_valid_access() here. + */ + ((struct bpf_prog *)prog)->delivery_time_access = 1; + return size == sizeof(__u8); } return bpf_skb_is_valid_access(off, size, type, prog, info); @@ -8838,6 +8888,45 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } +static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, + struct bpf_insn *insn) +{ + __u8 value_reg = si->dst_reg; + __u8 skb_reg = si->src_reg; + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* value_reg = BPF_SKB_DELIVERY_TIME_MONO */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_MONO); + *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 10 : 5); + + *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, skb_reg, + offsetof(struct sk_buff, tstamp)); + *insn++ = BPF_JMP_IMM(BPF_JNE, tmp_reg, 0, 2); + /* value_reg = BPF_SKB_DELIVERY_TIME_NONE */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_NONE); + *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); + +#ifdef CONFIG_NET_CLS_ACT + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* At ingress, value_reg = 0 */ + *insn++ = BPF_MOV32_IMM(value_reg, 0); + *insn++ = BPF_JMP_A(1); +#endif + + /* value_reg = BPF_SKB_DELIVERYT_TIME_UNSPEC */ + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_UNSPEC); + + /* 15 insns with CONFIG_NET_CLS_ACT */ + return insn; +} + static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, struct bpf_insn *insn) { @@ -8859,29 +8948,32 @@ static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si, return insn; } -static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_insn *si, +static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, + const struct bpf_insn *si, struct bpf_insn *insn) { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT - __u8 tmp_reg = BPF_REG_AX; - - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); - /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, - * so check the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ - *insn++ = BPF_MOV64_IMM(value_reg, 0); - *insn++ = BPF_JMP_A(1); + if (!prog->delivery_time_access) { + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 5); + /* @ingress, read __sk_buff->tstamp as the (rcv) timestamp, + * so check the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); + /* skb->mono_delivery_time is set, read 0 as the (rcv) timestamp. */ + *insn++ = BPF_MOV64_IMM(value_reg, 0); + *insn++ = BPF_JMP_A(1); + } #endif *insn++ = BPF_LDX_MEM(BPF_DW, value_reg, skb_reg, @@ -8889,27 +8981,30 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_insn *si, return insn; } -static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_insn *si, +static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, + const struct bpf_insn *si, struct bpf_insn *insn) { __u8 value_reg = si->src_reg; __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT - __u8 tmp_reg = BPF_REG_AX; - - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); - /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. - * Clear the skb->mono_delivery_time. - */ - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - ~SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, - SKB_MONO_DELIVERY_TIME_OFFSET); + if (!prog->delivery_time_access) { + __u8 tmp_reg = BPF_REG_AX; + + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, TC_AT_INGRESS_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); + *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 3); + /* Writing __sk_buff->tstamp at ingress as the (rcv) timestamp. + * Clear the skb->mono_delivery_time. + */ + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, + ~SKB_MONO_DELIVERY_TIME_MASK); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, + SKB_MONO_DELIVERY_TIME_OFFSET); + } #endif /* skb->tstamp = tstamp */ @@ -9226,9 +9321,13 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) - insn = bpf_convert_tstamp_write(si, insn); + insn = bpf_convert_tstamp_write(prog, si, insn); else - insn = bpf_convert_tstamp_read(si, insn); + insn = bpf_convert_tstamp_read(prog, si, insn); + break; + + case offsetof(struct __sk_buff, delivery_time_type): + insn = bpf_convert_dtime_type_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index afe3d0d7f5f2..4eebea830613 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5086,6 +5086,37 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. + * + * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * Description + * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also + * change the __sk_buff->delivery_time_type to *dtime_type*. + * + * When setting a delivery time (non zero *dtime*) to + * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* + * is supported. It is the only delivery_time_type that will be + * kept after bpf_redirect_*(). + * + * If there is no need to change the __sk_buff->delivery_time_type, + * the delivery time can be directly written to __sk_buff->tstamp + * instead. + * + * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE + * can be used to clear any delivery time stored in + * __sk_buff->tstamp. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * Return + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5280,6 +5311,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ + FN(skb_set_delivery_time), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5469,6 +5501,12 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +enum { + BPF_SKB_DELIVERY_TIME_NONE, + BPF_SKB_DELIVERY_TIME_UNSPEC, + BPF_SKB_DELIVERY_TIME_MONO, +}; + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -5509,7 +5547,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u32 :32; /* Padding, future use. */ + __u8 delivery_time_type; + __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; -- cgit v1.2.3-71-gd317 From 51ef2be546e2e480e56fdb59fdeb9a4406e8d52e Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 17 Feb 2022 16:44:07 +0100 Subject: media: i2c: isl7998x: Add driver for Intersil ISL7998x Add driver for the Intersil ISL7998x Analog to MIPI CSI-2/BT656 decoder. This chip supports 1/2/4 analog video inputs and converts them into 1/2/4 VCs in MIPI CSI2 stream. This driver currently supports ISL79987 and both 720x480 and 720x576 resolutions, however as per specification, all inputs must use the same resolution and standard. The only supported pixel format is now YUYV/YUV422. The chip should support RGB565 on the CSI2 as well, but this is currently unsupported. Signed-off-by: Marek Vasut Cc: Sakari Ailus Cc: Mauro Carvalho Chehab Cc: Rob Herring To: linux-media@vger.kernel.org Signed-off-by: Michael Tretter Acked-by: Hans Verkuil [Sakari Ailus: Always call pm_runtime_get_and_resume in pre_streamon] Signed-off-by: Sakari Ailus --- MAINTAINERS | 8 + drivers/media/i2c/Kconfig | 10 + drivers/media/i2c/Makefile | 1 + drivers/media/i2c/isl7998x.c | 1628 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/v4l2-controls.h | 6 + 5 files changed, 1653 insertions(+) create mode 100644 drivers/media/i2c/isl7998x.c (limited to 'include/uapi/linux') diff --git a/MAINTAINERS b/MAINTAINERS index b3949b10dbbf..50015ff7ff54 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10006,6 +10006,14 @@ L: linux-iio@vger.kernel.org F: Documentation/devicetree/bindings/counter/interrupt-counter.yaml F: drivers/counter/interrupt-cnt.c +INTERSIL ISL7998X VIDEO DECODER DRIVER +M: Michael Tretter +R: Pengutronix Kernel Team +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/i2c/isil,isl79987.yaml +F: drivers/media/i2c/isl7998x.c + INVENSENSE ICM-426xx IMU DRIVER M: Jean-Baptiste Maneyrol L: linux-iio@vger.kernel.org diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 508145b572cf..e7194c1be4d2 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -325,6 +325,16 @@ config VIDEO_BT866 To compile this driver as a module, choose M here: the module will be called bt866. +config VIDEO_ISL7998X + tristate "Intersil ISL7998x video decoder" + depends on VIDEO_V4L2 && I2C + depends on OF_GPIO + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API + help + Support for Intersil ISL7998x analog to MIPI-CSI2 or + BT.656 decoder. + config VIDEO_KS0127 tristate "KS0127 video decoder" depends on VIDEO_V4L2 && I2C diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index d85d7a7e9c0f..7f8c1df60330 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -134,6 +134,7 @@ obj-$(CONFIG_VIDEO_IMX334) += imx334.o obj-$(CONFIG_VIDEO_IMX335) += imx335.o obj-$(CONFIG_VIDEO_IMX355) += imx355.o obj-$(CONFIG_VIDEO_IMX412) += imx412.o +obj-$(CONFIG_VIDEO_ISL7998X) += isl7998x.o obj-$(CONFIG_VIDEO_MAX9286) += max9286.o obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c new file mode 100644 index 000000000000..dc3068549dfa --- /dev/null +++ b/drivers/media/i2c/isl7998x.c @@ -0,0 +1,1628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intersil ISL7998x analog to MIPI CSI-2 or BT.656 decoder driver. + * + * Copyright (C) 2018-2019 Marek Vasut + * Copyright (C) 2021 Michael Tretter + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * This control allows to activate and deactivate the test pattern on + * selected output channels. + * This value is ISL7998x specific. + */ +#define V4L2_CID_TEST_PATTERN_CHANNELS (V4L2_CID_USER_ISL7998X_BASE + 0) + +/* + * This control allows to specify the color of the test pattern. + * This value is ISL7998x specific. + */ +#define V4L2_CID_TEST_PATTERN_COLOR (V4L2_CID_USER_ISL7998X_BASE + 1) + +/* + * This control allows to specify the bar pattern in the test pattern. + * This value is ISL7998x specific. + */ +#define V4L2_CID_TEST_PATTERN_BARS (V4L2_CID_USER_ISL7998X_BASE + 2) + +#define ISL7998X_INPUTS 4 + +#define ISL7998X_REG(page, reg) (((page) << 8) | (reg)) + +#define ISL7998X_REG_PN_SIZE 256 +#define ISL7998X_REG_PN_BASE(n) ((n) * ISL7998X_REG_PN_SIZE) + +#define ISL7998X_REG_PX_DEC_PAGE(page) ISL7998X_REG((page), 0xff) +#define ISL7998X_REG_PX_DEC_PAGE_MASK 0xf +#define ISL7998X_REG_P0_PRODUCT_ID_CODE ISL7998X_REG(0, 0x00) +#define ISL7998X_REG_P0_PRODUCT_REV_CODE ISL7998X_REG(0, 0x01) +#define ISL7998X_REG_P0_SW_RESET_CTL ISL7998X_REG(0, 0x02) +#define ISL7998X_REG_P0_IO_BUFFER_CTL ISL7998X_REG(0, 0x03) +#define ISL7998X_REG_P0_IO_BUFFER_CTL_1_1 ISL7998X_REG(0, 0x04) +#define ISL7998X_REG_P0_IO_PAD_PULL_EN_CTL ISL7998X_REG(0, 0x05) +#define ISL7998X_REG_P0_IO_BUFFER_CTL_1_2 ISL7998X_REG(0, 0x06) +#define ISL7998X_REG_P0_VIDEO_IN_CHAN_CTL ISL7998X_REG(0, 0x07) +#define ISL7998X_REG_P0_CLK_CTL_1 ISL7998X_REG(0, 0x08) +#define ISL7998X_REG_P0_CLK_CTL_2 ISL7998X_REG(0, 0x09) +#define ISL7998X_REG_P0_CLK_CTL_3 ISL7998X_REG(0, 0x0a) +#define ISL7998X_REG_P0_CLK_CTL_4 ISL7998X_REG(0, 0x0b) +#define ISL7998X_REG_P0_MPP1_SYNC_CTL ISL7998X_REG(0, 0x0c) +#define ISL7998X_REG_P0_MPP2_SYNC_CTL ISL7998X_REG(0, 0x0d) +#define ISL7998X_REG_P0_IRQ_SYNC_CTL ISL7998X_REG(0, 0x0e) +#define ISL7998X_REG_P0_INTERRUPT_STATUS ISL7998X_REG(0, 0x10) +#define ISL7998X_REG_P0_CHAN_1_IRQ ISL7998X_REG(0, 0x11) +#define ISL7998X_REG_P0_CHAN_2_IRQ ISL7998X_REG(0, 0x12) +#define ISL7998X_REG_P0_CHAN_3_IRQ ISL7998X_REG(0, 0x13) +#define ISL7998X_REG_P0_CHAN_4_IRQ ISL7998X_REG(0, 0x14) +#define ISL7998X_REG_P0_SHORT_DIAG_IRQ ISL7998X_REG(0, 0x15) +#define ISL7998X_REG_P0_CHAN_1_IRQ_EN ISL7998X_REG(0, 0x16) +#define ISL7998X_REG_P0_CHAN_2_IRQ_EN ISL7998X_REG(0, 0x17) +#define ISL7998X_REG_P0_CHAN_3_IRQ_EN ISL7998X_REG(0, 0x18) +#define ISL7998X_REG_P0_CHAN_4_IRQ_EN ISL7998X_REG(0, 0x19) +#define ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN ISL7998X_REG(0, 0x1a) +#define ISL7998X_REG_P0_CHAN_1_STATUS ISL7998X_REG(0, 0x1b) +#define ISL7998X_REG_P0_CHAN_2_STATUS ISL7998X_REG(0, 0x1c) +#define ISL7998X_REG_P0_CHAN_3_STATUS ISL7998X_REG(0, 0x1d) +#define ISL7998X_REG_P0_CHAN_4_STATUS ISL7998X_REG(0, 0x1e) +#define ISL7998X_REG_P0_SHORT_DIAG_STATUS ISL7998X_REG(0, 0x1f) +#define ISL7998X_REG_P0_CLOCK_DELAY ISL7998X_REG(0, 0x20) + +#define ISL7998X_REG_PX_DEC_INPUT_FMT(pg) ISL7998X_REG((pg), 0x02) +#define ISL7998X_REG_PX_DEC_STATUS_1(pg) ISL7998X_REG((pg), 0x03) +#define ISL7998X_REG_PX_DEC_STATUS_1_VDLOSS BIT(7) +#define ISL7998X_REG_PX_DEC_STATUS_1_HLOCK BIT(6) +#define ISL7998X_REG_PX_DEC_STATUS_1_VLOCK BIT(3) +#define ISL7998X_REG_PX_DEC_HS_DELAY_CTL(pg) ISL7998X_REG((pg), 0x04) +#define ISL7998X_REG_PX_DEC_ANCTL(pg) ISL7998X_REG((pg), 0x06) +#define ISL7998X_REG_PX_DEC_CROP_HI(pg) ISL7998X_REG((pg), 0x07) +#define ISL7998X_REG_PX_DEC_VDELAY_LO(pg) ISL7998X_REG((pg), 0x08) +#define ISL7998X_REG_PX_DEC_VACTIVE_LO(pg) ISL7998X_REG((pg), 0x09) +#define ISL7998X_REG_PX_DEC_HDELAY_LO(pg) ISL7998X_REG((pg), 0x0a) +#define ISL7998X_REG_PX_DEC_HACTIVE_LO(pg) ISL7998X_REG((pg), 0x0b) +#define ISL7998X_REG_PX_DEC_CNTRL1(pg) ISL7998X_REG((pg), 0x0c) +#define ISL7998X_REG_PX_DEC_CSC_CTL(pg) ISL7998X_REG((pg), 0x0d) +#define ISL7998X_REG_PX_DEC_BRIGHT(pg) ISL7998X_REG((pg), 0x10) +#define ISL7998X_REG_PX_DEC_CONTRAST(pg) ISL7998X_REG((pg), 0x11) +#define ISL7998X_REG_PX_DEC_SHARPNESS(pg) ISL7998X_REG((pg), 0x12) +#define ISL7998X_REG_PX_DEC_SAT_U(pg) ISL7998X_REG((pg), 0x13) +#define ISL7998X_REG_PX_DEC_SAT_V(pg) ISL7998X_REG((pg), 0x14) +#define ISL7998X_REG_PX_DEC_HUE(pg) ISL7998X_REG((pg), 0x15) +#define ISL7998X_REG_PX_DEC_VERT_PEAK(pg) ISL7998X_REG((pg), 0x17) +#define ISL7998X_REG_PX_DEC_CORING(pg) ISL7998X_REG((pg), 0x18) +#define ISL7998X_REG_PX_DEC_SDT(pg) ISL7998X_REG((pg), 0x1c) +#define ISL7998X_REG_PX_DEC_SDT_DET BIT(7) +#define ISL7998X_REG_PX_DEC_SDT_NOW GENMASK(6, 4) +#define ISL7998X_REG_PX_DEC_SDT_STANDARD GENMASK(2, 0) +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_M 0 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL 1 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_SECAM 2 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_443 3 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_M 4 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_CN 5 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_60 6 +#define ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN 7 +#define ISL7998X_REG_PX_DEC_SDTR(pg) ISL7998X_REG((pg), 0x1d) +#define ISL7998X_REG_PX_DEC_SDTR_ATSTART BIT(7) +#define ISL7998X_REG_PX_DEC_CLMPG(pg) ISL7998X_REG((pg), 0x20) +#define ISL7998X_REG_PX_DEC_IAGC(pg) ISL7998X_REG((pg), 0x21) +#define ISL7998X_REG_PX_DEC_AGCGAIN(pg) ISL7998X_REG((pg), 0x22) +#define ISL7998X_REG_PX_DEC_PEAKWT(pg) ISL7998X_REG((pg), 0x23) +#define ISL7998X_REG_PX_DEC_CLMPL(pg) ISL7998X_REG((pg), 0x24) +#define ISL7998X_REG_PX_DEC_SYNCT(pg) ISL7998X_REG((pg), 0x25) +#define ISL7998X_REG_PX_DEC_MISSCNT(pg) ISL7998X_REG((pg), 0x26) +#define ISL7998X_REG_PX_DEC_PCLAMP(pg) ISL7998X_REG((pg), 0x27) +#define ISL7998X_REG_PX_DEC_VERT_CTL_1(pg) ISL7998X_REG((pg), 0x28) +#define ISL7998X_REG_PX_DEC_VERT_CTL_2(pg) ISL7998X_REG((pg), 0x29) +#define ISL7998X_REG_PX_DEC_CLR_KILL_LVL(pg) ISL7998X_REG((pg), 0x2a) +#define ISL7998X_REG_PX_DEC_COMB_FILTER_CTL(pg) ISL7998X_REG((pg), 0x2b) +#define ISL7998X_REG_PX_DEC_LUMA_DELAY(pg) ISL7998X_REG((pg), 0x2c) +#define ISL7998X_REG_PX_DEC_MISC1(pg) ISL7998X_REG((pg), 0x2d) +#define ISL7998X_REG_PX_DEC_MISC2(pg) ISL7998X_REG((pg), 0x2e) +#define ISL7998X_REG_PX_DEC_MISC3(pg) ISL7998X_REG((pg), 0x2f) +#define ISL7998X_REG_PX_DEC_MVSN(pg) ISL7998X_REG((pg), 0x30) +#define ISL7998X_REG_PX_DEC_CSTATUS2(pg) ISL7998X_REG((pg), 0x31) +#define ISL7998X_REG_PX_DEC_HFREF(pg) ISL7998X_REG((pg), 0x32) +#define ISL7998X_REG_PX_DEC_CLMD(pg) ISL7998X_REG((pg), 0x33) +#define ISL7998X_REG_PX_DEC_ID_DET_CTL(pg) ISL7998X_REG((pg), 0x34) +#define ISL7998X_REG_PX_DEC_CLCNTL(pg) ISL7998X_REG((pg), 0x35) +#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_1(pg) ISL7998X_REG((pg), 0x36) +#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_2(pg) ISL7998X_REG((pg), 0x37) +#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_3(pg) ISL7998X_REG((pg), 0x38) +#define ISL7998X_REG_PX_DEC_DIFF_CLMP_CTL_4(pg) ISL7998X_REG((pg), 0x39) +#define ISL7998X_REG_PX_DEC_SHORT_DET_CTL(pg) ISL7998X_REG((pg), 0x3a) +#define ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(pg) ISL7998X_REG((pg), 0x3b) +#define ISL7998X_REG_PX_DEC_AFE_TST_MUX_CTL(pg) ISL7998X_REG((pg), 0x3c) +#define ISL7998X_REG_PX_DEC_DATA_CONV(pg) ISL7998X_REG((pg), 0x3d) +#define ISL7998X_REG_PX_DEC_INTERNAL_TEST(pg) ISL7998X_REG((pg), 0x3f) +#define ISL7998X_REG_PX_DEC_H_DELAY_CTL(pg) ISL7998X_REG((pg), 0x43) +#define ISL7998X_REG_PX_DEC_H_DELAY_II_HI(pg) ISL7998X_REG((pg), 0x44) +#define ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(pg) ISL7998X_REG((pg), 0x45) + +#define ISL7998X_REG_PX_ACA_CTL_1(pg) ISL7998X_REG((pg), 0x80) +#define ISL7998X_REG_PX_ACA_GAIN_CTL(pg) ISL7998X_REG((pg), 0x81) +#define ISL7998X_REG_PX_ACA_Y_AVG_HI_LIMIT(pg) ISL7998X_REG((pg), 0x82) +#define ISL7998X_REG_PX_ACA_Y_AVG_LO_LIMIT(pg) ISL7998X_REG((pg), 0x83) +#define ISL7998X_REG_PX_ACA_Y_DET_THRESHOLD(pg) ISL7998X_REG((pg), 0x84) +#define ISL7998X_REG_PX_ACA_BLACK_LVL(pg) ISL7998X_REG((pg), 0x85) +#define ISL7998X_REG_PX_ACA_CENTER_LVL(pg) ISL7998X_REG((pg), 0x86) +#define ISL7998X_REG_PX_ACA_WHITE_LVL(pg) ISL7998X_REG((pg), 0x87) +#define ISL7998X_REG_PX_ACA_MEAN_OFF_LIMIT(pg) ISL7998X_REG((pg), 0x88) +#define ISL7998X_REG_PX_ACA_MEAN_OFF_UPGAIN(pg) ISL7998X_REG((pg), 0x89) +#define ISL7998X_REG_PX_ACA_MEAN_OFF_SLOPE(pg) ISL7998X_REG((pg), 0x8a) +#define ISL7998X_REG_PX_ACA_MEAN_OFF_DNGAIN(pg) ISL7998X_REG((pg), 0x8b) +#define ISL7998X_REG_PX_ACA_DELTA_CO_THRES(pg) ISL7998X_REG((pg), 0x8c) +#define ISL7998X_REG_PX_ACA_DELTA_SLOPE(pg) ISL7998X_REG((pg), 0x8d) +#define ISL7998X_REG_PX_ACA_LO_HI_AVG_THRES(pg) ISL7998X_REG((pg), 0x8e) +#define ISL7998X_REG_PX_ACA_LO_MAX_LVL_CTL(pg) ISL7998X_REG((pg), 0x8f) +#define ISL7998X_REG_PX_ACA_HI_MAX_LVL_CTL(pg) ISL7998X_REG((pg), 0x90) +#define ISL7998X_REG_PX_ACA_LO_UPGAIN_CTL(pg) ISL7998X_REG((pg), 0x91) +#define ISL7998X_REG_PX_ACA_LO_DNGAIN_CTL(pg) ISL7998X_REG((pg), 0x92) +#define ISL7998X_REG_PX_ACA_HI_UPGAIN_CTL(pg) ISL7998X_REG((pg), 0x93) +#define ISL7998X_REG_PX_ACA_HI_DNGAIN_CTL(pg) ISL7998X_REG((pg), 0x94) +#define ISL7998X_REG_PX_ACA_LOPASS_FLT_COEF(pg) ISL7998X_REG((pg), 0x95) +#define ISL7998X_REG_PX_ACA_PDF_INDEX(pg) ISL7998X_REG((pg), 0x96) +#define ISL7998X_REG_PX_ACA_HIST_WIN_H_STT(pg) ISL7998X_REG((pg), 0x97) +#define ISL7998X_REG_PX_ACA_HIST_WIN_H_SZ1(pg) ISL7998X_REG((pg), 0x98) +#define ISL7998X_REG_PX_ACA_HIST_WIN_H_SZ2(pg) ISL7998X_REG((pg), 0x99) +#define ISL7998X_REG_PX_ACA_HIST_WIN_V_STT(pg) ISL7998X_REG((pg), 0x9a) +#define ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ1(pg) ISL7998X_REG((pg), 0x9b) +#define ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(pg) ISL7998X_REG((pg), 0x9c) +#define ISL7998X_REG_PX_ACA_Y_AVG(pg) ISL7998X_REG((pg), 0xa0) +#define ISL7998X_REG_PX_ACA_Y_AVG_LIM(pg) ISL7998X_REG((pg), 0xa1) +#define ISL7998X_REG_PX_ACA_LO_AVG(pg) ISL7998X_REG((pg), 0xa2) +#define ISL7998X_REG_PX_ACA_HI_AVG(pg) ISL7998X_REG((pg), 0xa3) +#define ISL7998X_REG_PX_ACA_Y_MAX(pg) ISL7998X_REG((pg), 0xa4) +#define ISL7998X_REG_PX_ACA_Y_MIN(pg) ISL7998X_REG((pg), 0xa5) +#define ISL7998X_REG_PX_ACA_MOFFSET(pg) ISL7998X_REG((pg), 0xa6) +#define ISL7998X_REG_PX_ACA_LO_GAIN(pg) ISL7998X_REG((pg), 0xa7) +#define ISL7998X_REG_PX_ACA_HI_GAIN(pg) ISL7998X_REG((pg), 0xa8) +#define ISL7998X_REG_PX_ACA_LL_SLOPE(pg) ISL7998X_REG((pg), 0xa9) +#define ISL7998X_REG_PX_ACA_LH_SLOPE(pg) ISL7998X_REG((pg), 0xaa) +#define ISL7998X_REG_PX_ACA_HL_SLOPE(pg) ISL7998X_REG((pg), 0xab) +#define ISL7998X_REG_PX_ACA_HH_SLOPE(pg) ISL7998X_REG((pg), 0xac) +#define ISL7998X_REG_PX_ACA_X_LOW(pg) ISL7998X_REG((pg), 0xad) +#define ISL7998X_REG_PX_ACA_X_MEAN(pg) ISL7998X_REG((pg), 0xae) +#define ISL7998X_REG_PX_ACA_X_HIGH(pg) ISL7998X_REG((pg), 0xaf) +#define ISL7998X_REG_PX_ACA_Y_LOW(pg) ISL7998X_REG((pg), 0xb0) +#define ISL7998X_REG_PX_ACA_Y_MEAN(pg) ISL7998X_REG((pg), 0xb1) +#define ISL7998X_REG_PX_ACA_Y_HIGH(pg) ISL7998X_REG((pg), 0xb2) +#define ISL7998X_REG_PX_ACA_CTL_2(pg) ISL7998X_REG((pg), 0xb3) +#define ISL7998X_REG_PX_ACA_CTL_3(pg) ISL7998X_REG((pg), 0xb4) +#define ISL7998X_REG_PX_ACA_CTL_4(pg) ISL7998X_REG((pg), 0xb5) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(pg) ISL7998X_REG((pg), 0xc0) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TL_H(pg) ISL7998X_REG((pg), 0xc1) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TL_L(pg) ISL7998X_REG((pg), 0xc2) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TL_H(pg) ISL7998X_REG((pg), 0xc3) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TL_L(pg) ISL7998X_REG((pg), 0xc4) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TR_H(pg) ISL7998X_REG((pg), 0xc5) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TR_L(pg) ISL7998X_REG((pg), 0xc6) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TR_H(pg) ISL7998X_REG((pg), 0xc7) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TR_L(pg) ISL7998X_REG((pg), 0xc8) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BL_H(pg) ISL7998X_REG((pg), 0xc9) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BL_L(pg) ISL7998X_REG((pg), 0xca) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BL_H(pg) ISL7998X_REG((pg), 0xcb) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BL_L(pg) ISL7998X_REG((pg), 0xcc) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BR_H(pg) ISL7998X_REG((pg), 0xcd) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BR_L(pg) ISL7998X_REG((pg), 0xce) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BR_H(pg) ISL7998X_REG((pg), 0xcf) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BR_L(pg) ISL7998X_REG((pg), 0xd0) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_LM_H(pg) ISL7998X_REG((pg), 0xd1) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_LM_L(pg) ISL7998X_REG((pg), 0xd2) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_LM_H(pg) ISL7998X_REG((pg), 0xd3) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_LM_L(pg) ISL7998X_REG((pg), 0xd4) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TM_H(pg) ISL7998X_REG((pg), 0xd5) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_TM_L(pg) ISL7998X_REG((pg), 0xd6) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TM_H(pg) ISL7998X_REG((pg), 0xd7) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_TM_L(pg) ISL7998X_REG((pg), 0xd8) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BM_H(pg) ISL7998X_REG((pg), 0xd9) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_BM_L(pg) ISL7998X_REG((pg), 0xda) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BM_H(pg) ISL7998X_REG((pg), 0xdb) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_BM_L(pg) ISL7998X_REG((pg), 0xdc) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_RM_H(pg) ISL7998X_REG((pg), 0xdd) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_X_RM_L(pg) ISL7998X_REG((pg), 0xde) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_RM_H(pg) ISL7998X_REG((pg), 0xdf) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_RM_L(pg) ISL7998X_REG((pg), 0xe0) +#define ISL7998X_REG_PX_ACA_HIST_DATA_LO(pg) ISL7998X_REG((pg), 0xe1) +#define ISL7998X_REG_PX_ACA_HIST_DATA_MID(pg) ISL7998X_REG((pg), 0xe2) +#define ISL7998X_REG_PX_ACA_HIST_DATA_HI(pg) ISL7998X_REG((pg), 0xe3) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_Y_CLR(pg) ISL7998X_REG((pg), 0xe4) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_CB_CLR(pg) ISL7998X_REG((pg), 0xe5) +#define ISL7998X_REG_PX_ACA_FLEX_WIN_CR_CLR(pg) ISL7998X_REG((pg), 0xe6) +#define ISL7998X_REG_PX_ACA_XFER_HIST_HOST(pg) ISL7998X_REG((pg), 0xe7) + +#define ISL7998X_REG_P5_LI_ENGINE_CTL ISL7998X_REG(5, 0x00) +#define ISL7998X_REG_P5_LI_ENGINE_LINE_CTL ISL7998X_REG(5, 0x01) +#define ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH ISL7998X_REG(5, 0x02) +#define ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL ISL7998X_REG(5, 0x03) +#define ISL7998X_REG_P5_LI_ENGINE_VC_ASSIGNMENT ISL7998X_REG(5, 0x04) +#define ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL ISL7998X_REG(5, 0x05) +#define ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL ISL7998X_REG(5, 0x06) +#define ISL7998X_REG_P5_MIPI_READ_START_CTL ISL7998X_REG(5, 0x07) +#define ISL7998X_REG_P5_PSEUDO_FRM_FIELD_CTL ISL7998X_REG(5, 0x08) +#define ISL7998X_REG_P5_ONE_FIELD_MODE_CTL ISL7998X_REG(5, 0x09) +#define ISL7998X_REG_P5_MIPI_INT_HW_TST_CTR ISL7998X_REG(5, 0x0a) +#define ISL7998X_REG_P5_TP_GEN_BAR_PATTERN ISL7998X_REG(5, 0x0b) +#define ISL7998X_REG_P5_MIPI_PCNT_PSFRM ISL7998X_REG(5, 0x0c) +#define ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL ISL7998X_REG(5, 0x0d) +#define ISL7998X_REG_P5_MIPI_VBLANK_PSFRM ISL7998X_REG(5, 0x0e) +#define ISL7998X_REG_P5_LI_ENGINE_CTL_2 ISL7998X_REG(5, 0x0f) +#define ISL7998X_REG_P5_MIPI_WCNT_1 ISL7998X_REG(5, 0x10) +#define ISL7998X_REG_P5_MIPI_WCNT_2 ISL7998X_REG(5, 0x11) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_1 ISL7998X_REG(5, 0x12) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_2 ISL7998X_REG(5, 0x13) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_3 ISL7998X_REG(5, 0x14) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_4 ISL7998X_REG(5, 0x15) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_5 ISL7998X_REG(5, 0x16) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_6 ISL7998X_REG(5, 0x17) +#define ISL7998X_REG_P5_MIPI_DPHY_PARAMS_1 ISL7998X_REG(5, 0x18) +#define ISL7998X_REG_P5_MIPI_DPHY_SOT_PERIOD ISL7998X_REG(5, 0x19) +#define ISL7998X_REG_P5_MIPI_DPHY_EOT_PERIOD ISL7998X_REG(5, 0x1a) +#define ISL7998X_REG_P5_MIPI_DPHY_PARAMS_2 ISL7998X_REG(5, 0x1b) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_7 ISL7998X_REG(5, 0x1c) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_8 ISL7998X_REG(5, 0x1d) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_9 ISL7998X_REG(5, 0x1e) +#define ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_10 ISL7998X_REG(5, 0x1f) +#define ISL7998X_REG_P5_TP_GEN_MIPI ISL7998X_REG(5, 0x20) +#define ISL7998X_REG_P5_ESC_MODE_TIME_CTL ISL7998X_REG(5, 0x21) +#define ISL7998X_REG_P5_AUTO_TEST_ERR_DET ISL7998X_REG(5, 0x22) +#define ISL7998X_REG_P5_MIPI_TIMING ISL7998X_REG(5, 0x23) +#define ISL7998X_REG_P5_PIC_HEIGHT_HIGH ISL7998X_REG(5, 0x24) +#define ISL7998X_REG_P5_PIC_HEIGHT_LOW ISL7998X_REG(5, 0x25) +#define ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL ISL7998X_REG(5, 0x26) +#define ISL7998X_REG_P5_FIFO_THRSH_CNT_1 ISL7998X_REG(5, 0x28) +#define ISL7998X_REG_P5_FIFO_THRSH_CNT_2 ISL7998X_REG(5, 0x29) +#define ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1 ISL7998X_REG(5, 0x2a) +#define ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2 ISL7998X_REG(5, 0x2b) +#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_1 ISL7998X_REG(5, 0x2c) +#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_2 ISL7998X_REG(5, 0x2d) +#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_3 ISL7998X_REG(5, 0x2e) +#define ISL7998X_REG_P5_PSF_FIELD_END_CTL_4 ISL7998X_REG(5, 0x2f) +#define ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_1 ISL7998X_REG(5, 0x30) +#define ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_2 ISL7998X_REG(5, 0x31) +#define ISL7998X_REG_P5_MIPI_ANA_CLK_CTL ISL7998X_REG(5, 0x32) +#define ISL7998X_REG_P5_PLL_ANA_STATUS ISL7998X_REG(5, 0x33) +#define ISL7998X_REG_P5_PLL_ANA_MISC_CTL ISL7998X_REG(5, 0x34) +#define ISL7998X_REG_P5_MIPI_ANA ISL7998X_REG(5, 0x35) +#define ISL7998X_REG_P5_PLL_ANA ISL7998X_REG(5, 0x36) +#define ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1 ISL7998X_REG(5, 0x38) +#define ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_2 ISL7998X_REG(5, 0x39) +#define ISL7998X_REG_P5_H_LINE_CNT_1 ISL7998X_REG(5, 0x3a) +#define ISL7998X_REG_P5_H_LINE_CNT_2 ISL7998X_REG(5, 0x3b) +#define ISL7998X_REG_P5_HIST_LINE_CNT_1 ISL7998X_REG(5, 0x3c) +#define ISL7998X_REG_P5_HIST_LINE_CNT_2 ISL7998X_REG(5, 0x3d) + +static const struct reg_sequence isl7998x_init_seq_1[] = { + { ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN, 0xff }, + { ISL7998X_REG_PX_DEC_SDT(0x1), 0x00 }, + { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x1), 0x03 }, + { ISL7998X_REG_PX_DEC_SDT(0x2), 0x00 }, + { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x2), 0x03 }, + { ISL7998X_REG_PX_DEC_SDT(0x3), 0x00 }, + { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x3), 0x03 }, + { ISL7998X_REG_PX_DEC_SDT(0x4), 0x00 }, + { ISL7998X_REG_PX_DEC_SHORT_DET_CTL_1(0x4), 0x03 }, + { ISL7998X_REG_P5_LI_ENGINE_CTL, 0x00 }, + { ISL7998X_REG_P0_SW_RESET_CTL, 0x1f, 10 }, + { ISL7998X_REG_P0_IO_BUFFER_CTL, 0x00 }, + { ISL7998X_REG_P0_MPP2_SYNC_CTL, 0xc9 }, + { ISL7998X_REG_P0_IRQ_SYNC_CTL, 0xc9 }, + { ISL7998X_REG_P0_CHAN_1_IRQ, 0x03 }, + { ISL7998X_REG_P0_CHAN_2_IRQ, 0x00 }, + { ISL7998X_REG_P0_CHAN_3_IRQ, 0x00 }, + { ISL7998X_REG_P0_CHAN_4_IRQ, 0x00 }, + { ISL7998X_REG_P5_LI_ENGINE_CTL, 0x02 }, + { ISL7998X_REG_P5_LI_ENGINE_LINE_CTL, 0x85 }, + { ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH, 0xa0 }, + { ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL, 0x18 }, + { ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL, 0x40 }, + { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x40 }, + { ISL7998X_REG_P5_MIPI_WCNT_1, 0x05 }, + { ISL7998X_REG_P5_MIPI_WCNT_2, 0xa0 }, + { ISL7998X_REG_P5_TP_GEN_MIPI, 0x00 }, + { ISL7998X_REG_P5_ESC_MODE_TIME_CTL, 0x0c }, + { ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL, 0x00 }, + { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1, 0x00 }, + { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2, 0x19 }, + { ISL7998X_REG_P5_PSF_FIELD_END_CTL_1, 0x18 }, + { ISL7998X_REG_P5_PSF_FIELD_END_CTL_2, 0xf1 }, + { ISL7998X_REG_P5_PSF_FIELD_END_CTL_3, 0x00 }, + { ISL7998X_REG_P5_PSF_FIELD_END_CTL_4, 0xf1 }, + { ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_1, 0x00 }, + { ISL7998X_REG_P5_MIPI_ANA_DATA_CTL_2, 0x00 }, + { ISL7998X_REG_P5_MIPI_ANA_CLK_CTL, 0x00 }, + { ISL7998X_REG_P5_PLL_ANA_STATUS, 0xc0 }, + { ISL7998X_REG_P5_PLL_ANA_MISC_CTL, 0x18 }, + { ISL7998X_REG_P5_PLL_ANA, 0x00 }, + { ISL7998X_REG_P0_SW_RESET_CTL, 0x10, 10 }, + /* Page 0xf means write to all of pages 1,2,3,4 */ + { ISL7998X_REG_PX_DEC_VDELAY_LO(0xf), 0x14 }, + { ISL7998X_REG_PX_DEC_MISC3(0xf), 0xe6 }, + { ISL7998X_REG_PX_DEC_CLMD(0xf), 0x85 }, + { ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(0xf), 0x11 }, + { ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf), 0x00 }, + { ISL7998X_REG_P0_CLK_CTL_1, 0x1f }, + { ISL7998X_REG_P0_CLK_CTL_2, 0x43 }, + { ISL7998X_REG_P0_CLK_CTL_3, 0x4f }, +}; + +static const struct reg_sequence isl7998x_init_seq_2[] = { + { ISL7998X_REG_P5_LI_ENGINE_SYNC_CTL, 0x10 }, + { ISL7998X_REG_P5_LI_ENGINE_VC_ASSIGNMENT, 0xe4 }, + { ISL7998X_REG_P5_LI_ENGINE_TYPE_CTL, 0x00 }, + { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x60 }, + { ISL7998X_REG_P5_MIPI_READ_START_CTL, 0x2b }, + { ISL7998X_REG_P5_PSEUDO_FRM_FIELD_CTL, 0x02 }, + { ISL7998X_REG_P5_ONE_FIELD_MODE_CTL, 0x00 }, + { ISL7998X_REG_P5_MIPI_INT_HW_TST_CTR, 0x62 }, + { ISL7998X_REG_P5_TP_GEN_BAR_PATTERN, 0x02 }, + { ISL7998X_REG_P5_MIPI_PCNT_PSFRM, 0x36 }, + { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL, 0x00 }, + { ISL7998X_REG_P5_MIPI_VBLANK_PSFRM, 0x6c }, + { ISL7998X_REG_P5_LI_ENGINE_CTL_2, 0x00 }, + { ISL7998X_REG_P5_MIPI_WCNT_1, 0x05 }, + { ISL7998X_REG_P5_MIPI_WCNT_2, 0xa0 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_1, 0x77 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_2, 0x17 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_3, 0x08 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_4, 0x38 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_5, 0x14 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_6, 0xf6 }, + { ISL7998X_REG_P5_MIPI_DPHY_PARAMS_1, 0x00 }, + { ISL7998X_REG_P5_MIPI_DPHY_SOT_PERIOD, 0x17 }, + { ISL7998X_REG_P5_MIPI_DPHY_EOT_PERIOD, 0x0a }, + { ISL7998X_REG_P5_MIPI_DPHY_PARAMS_2, 0x71 }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_7, 0x7a }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_8, 0x0f }, + { ISL7998X_REG_P5_MIPI_DPHY_TIMING_CTL_9, 0x8c }, + { ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL, 0x08 }, + { ISL7998X_REG_P5_FIFO_THRSH_CNT_1, 0x01 }, + { ISL7998X_REG_P5_FIFO_THRSH_CNT_2, 0x0e }, + { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_1, 0x00 }, + { ISL7998X_REG_P5_TP_GEN_RND_SYNC_CTL_2, 0x00 }, + { ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1, 0x03 }, + { ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_2, 0xc0 }, + { ISL7998X_REG_P5_H_LINE_CNT_1, 0x06 }, + { ISL7998X_REG_P5_H_LINE_CNT_2, 0xb3 }, + { ISL7998X_REG_P5_HIST_LINE_CNT_1, 0x00 }, + { ISL7998X_REG_P5_HIST_LINE_CNT_2, 0xf1 }, + { ISL7998X_REG_P5_LI_ENGINE_FIFO_CTL, 0x00 }, + { ISL7998X_REG_P5_MIPI_ANA, 0x00 }, + /* + * Wait a bit after reset so that the chip can capture a frame + * and update internal line counters. + */ + { ISL7998X_REG_P0_SW_RESET_CTL, 0x00, 50 }, +}; + +enum isl7998x_pads { + ISL7998X_PAD_OUT, + ISL7998X_PAD_VIN1, + ISL7998X_PAD_VIN2, + ISL7998X_PAD_VIN3, + ISL7998X_PAD_VIN4, + ISL7998X_NUM_PADS +}; + +struct isl7998x_datafmt { + u32 code; + enum v4l2_colorspace colorspace; +}; + +static const struct isl7998x_datafmt isl7998x_colour_fmts[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB }, +}; + +/* Menu items for LINK_FREQ V4L2 control */ +static const s64 link_freq_menu_items[] = { + /* 1 channel, 1 lane or 2 channels, 2 lanes */ + 108000000, + /* 2 channels, 1 lane or 4 channels, 2 lanes */ + 216000000, + /* 4 channels, 1 lane */ + 432000000, +}; + +/* Menu items for TEST_PATTERN V4L2 control */ +static const char * const isl7998x_test_pattern_menu[] = { + "Disabled", + "Enabled", +}; + +static const char * const isl7998x_test_pattern_bars[] = { + "bbbbwb", "bbbwwb", "bbwbwb", "bbwwwb", +}; + +static const char * const isl7998x_test_pattern_colors[] = { + "Yellow", "Blue", "Green", "Pink", +}; + +struct isl7998x_mode { + unsigned int width; + unsigned int height; + enum v4l2_field field; +}; + +static const struct isl7998x_mode supported_modes[] = { + { + .width = 720, + .height = 576, + .field = V4L2_FIELD_SEQ_TB, + }, + { + .width = 720, + .height = 480, + .field = V4L2_FIELD_SEQ_BT, + }, +}; + +static const struct isl7998x_video_std { + const v4l2_std_id norm; + unsigned int id; + const struct isl7998x_mode *mode; +} isl7998x_std_res[] = { + { V4L2_STD_NTSC_443, + ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_443, + &supported_modes[1] }, + { V4L2_STD_PAL_M, + ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_M, + &supported_modes[1] }, + { V4L2_STD_PAL_Nc, + ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_CN, + &supported_modes[0] }, + { V4L2_STD_PAL_N, + ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL, + &supported_modes[0] }, + { V4L2_STD_PAL_60, + ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL_60, + &supported_modes[1] }, + { V4L2_STD_NTSC, + ISL7998X_REG_PX_DEC_SDT_STANDARD_NTSC_M, + &supported_modes[1] }, + { V4L2_STD_PAL, + ISL7998X_REG_PX_DEC_SDT_STANDARD_PAL, + &supported_modes[0] }, + { V4L2_STD_SECAM, + ISL7998X_REG_PX_DEC_SDT_STANDARD_SECAM, + &supported_modes[0] }, + { V4L2_STD_UNKNOWN, + ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN, + &supported_modes[1] }, +}; + +struct isl7998x { + struct v4l2_subdev subdev; + struct regmap *regmap; + struct gpio_desc *pd_gpio; + struct gpio_desc *rstb_gpio; + unsigned int nr_mipi_lanes; + u32 nr_inputs; + + const struct isl7998x_datafmt *fmt; + v4l2_std_id norm; + struct media_pad pads[ISL7998X_NUM_PADS]; + + int enabled; + + /* protect fmt, norm, enabled */ + struct mutex lock; + + struct v4l2_ctrl_handler ctrl_handler; + /* protect ctrl_handler */ + struct mutex ctrl_mutex; + + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + u8 test_pattern; + u8 test_pattern_bars; + u8 test_pattern_chans; + u8 test_pattern_color; +}; + +static struct isl7998x *sd_to_isl7998x(struct v4l2_subdev *sd) +{ + return container_of(sd, struct isl7998x, subdev); +} + +static struct isl7998x *i2c_to_isl7998x(const struct i2c_client *client) +{ + return sd_to_isl7998x(i2c_get_clientdata(client)); +} + +static unsigned int isl7998x_norm_to_val(v4l2_std_id norm) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(isl7998x_std_res); i++) + if (isl7998x_std_res[i].norm & norm) + break; + if (i == ARRAY_SIZE(isl7998x_std_res)) + return ISL7998X_REG_PX_DEC_SDT_STANDARD_UNKNOWN; + + return isl7998x_std_res[i].id; +} + +static const struct isl7998x_mode *isl7998x_norm_to_mode(v4l2_std_id norm) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(isl7998x_std_res); i++) + if (isl7998x_std_res[i].norm & norm) + break; + /* Use NTSC default resolution during standard detection */ + if (i == ARRAY_SIZE(isl7998x_std_res)) + return &supported_modes[1]; + + return isl7998x_std_res[i].mode; +} + +static int isl7998x_get_nr_inputs(struct device_node *of_node) +{ + struct device_node *port; + unsigned int inputs = 0; + unsigned int i; + + if (of_graph_get_endpoint_count(of_node) > ISL7998X_NUM_PADS) + return -EINVAL; + + /* + * The driver does not provide means to remap the input ports. It + * always configures input ports to start from VID1. Ensure that the + * device tree is correct. + */ + for (i = ISL7998X_PAD_VIN1; i <= ISL7998X_PAD_VIN4; i++) { + port = of_graph_get_port_by_id(of_node, i); + if (!port) + continue; + + inputs |= BIT(i); + of_node_put(port); + } + + switch (inputs) { + case BIT(ISL7998X_PAD_VIN1): + return 1; + case BIT(ISL7998X_PAD_VIN1) | BIT(ISL7998X_PAD_VIN2): + return 2; + case BIT(ISL7998X_PAD_VIN1) | BIT(ISL7998X_PAD_VIN2) | + BIT(ISL7998X_PAD_VIN3) | BIT(ISL7998X_PAD_VIN4): + return 4; + default: + return -EINVAL; + } +} + +static int isl7998x_wait_power_on(struct isl7998x *isl7998x) +{ + struct device *dev = isl7998x->subdev.dev; + u32 chip_id; + int ret; + int err; + + ret = read_poll_timeout(regmap_read, err, !err, 2000, 20000, false, + isl7998x->regmap, + ISL7998X_REG_P0_PRODUCT_ID_CODE, &chip_id); + if (ret) { + dev_err(dev, "timeout while waiting for ISL7998X\n"); + return ret; + } + + dev_dbg(dev, "Found ISL799%x\n", chip_id); + + return ret; +} + +static int isl7998x_set_standard(struct isl7998x *isl7998x, v4l2_std_id norm) +{ + const struct isl7998x_mode *mode = isl7998x_norm_to_mode(norm); + unsigned int val = isl7998x_norm_to_val(norm); + unsigned int width = mode->width; + unsigned int i; + int ret; + + for (i = 0; i < ISL7998X_INPUTS; i++) { + ret = regmap_write_bits(isl7998x->regmap, + ISL7998X_REG_PX_DEC_SDT(i + 1), + ISL7998X_REG_PX_DEC_SDT_STANDARD, + val); + if (ret) + return ret; + } + + ret = regmap_write(isl7998x->regmap, + ISL7998X_REG_P5_LI_ENGINE_LINE_CTL, + 0x20 | ((width >> 7) & 0x1f)); + if (ret) + return ret; + + ret = regmap_write(isl7998x->regmap, + ISL7998X_REG_P5_LI_ENGINE_PIC_WIDTH, + (width << 1) & 0xff); + if (ret) + return ret; + + return 0; +} + +static int isl7998x_init(struct isl7998x *isl7998x) +{ + const unsigned int lanes = isl7998x->nr_mipi_lanes; + const u32 isl7998x_video_in_chan_map[] = { 0x00, 0x11, 0x02, 0x02 }; + const struct reg_sequence isl7998x_init_seq_custom[] = { + { ISL7998X_REG_P0_VIDEO_IN_CHAN_CTL, + isl7998x_video_in_chan_map[isl7998x->nr_inputs - 1] }, + { ISL7998X_REG_P0_CLK_CTL_4, + (lanes == 1) ? 0x40 : 0x41 }, + { ISL7998X_REG_P5_LI_ENGINE_CTL, + (lanes == 1) ? 0x01 : 0x02 }, + }; + struct device *dev = isl7998x->subdev.dev; + struct regmap *regmap = isl7998x->regmap; + int ret; + + dev_dbg(dev, "configuring %d lanes for %d inputs (norm %s)\n", + isl7998x->nr_mipi_lanes, isl7998x->nr_inputs, + v4l2_norm_to_name(isl7998x->norm)); + + ret = regmap_register_patch(regmap, isl7998x_init_seq_1, + ARRAY_SIZE(isl7998x_init_seq_1)); + if (ret) + return ret; + + mutex_lock(&isl7998x->lock); + ret = isl7998x_set_standard(isl7998x, isl7998x->norm); + mutex_unlock(&isl7998x->lock); + if (ret) + return ret; + + ret = regmap_register_patch(regmap, isl7998x_init_seq_custom, + ARRAY_SIZE(isl7998x_init_seq_custom)); + if (ret) + return ret; + + return regmap_register_patch(regmap, isl7998x_init_seq_2, + ARRAY_SIZE(isl7998x_init_seq_2)); +} + +static int isl7998x_set_test_pattern(struct isl7998x *isl7998x) +{ + const struct reg_sequence isl7998x_init_seq_tpg_off[] = { + { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL, 0 }, + { ISL7998X_REG_P5_LI_ENGINE_CTL_2, 0 } + }; + const struct reg_sequence isl7998x_init_seq_tpg_on[] = { + { ISL7998X_REG_P5_TP_GEN_BAR_PATTERN, + isl7998x->test_pattern_bars << 6 }, + { ISL7998X_REG_P5_LI_ENGINE_CTL_2, + isl7998x->norm & V4L2_STD_PAL ? BIT(2) : 0 }, + { ISL7998X_REG_P5_LI_ENGINE_TP_GEN_CTL, + (isl7998x->test_pattern_chans << 4) | + (isl7998x->test_pattern_color << 2) } + }; + struct device *dev = isl7998x->subdev.dev; + struct regmap *regmap = isl7998x->regmap; + int ret; + + if (pm_runtime_get_if_in_use(dev) <= 0) + return 0; + + if (isl7998x->test_pattern != 0) { + dev_dbg(dev, "enabling test pattern: channels 0x%x, %s, %s\n", + isl7998x->test_pattern_chans, + isl7998x_test_pattern_bars[isl7998x->test_pattern_bars], + isl7998x_test_pattern_colors[isl7998x->test_pattern_color]); + ret = regmap_register_patch(regmap, isl7998x_init_seq_tpg_on, + ARRAY_SIZE(isl7998x_init_seq_tpg_on)); + } else { + ret = regmap_register_patch(regmap, isl7998x_init_seq_tpg_off, + ARRAY_SIZE(isl7998x_init_seq_tpg_off)); + } + + pm_runtime_put(dev); + + return ret; +} + +#ifdef CONFIG_VIDEO_ADV_DEBUG +static int isl7998x_g_register(struct v4l2_subdev *sd, + struct v4l2_dbg_register *reg) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + int ret; + u32 val; + + ret = regmap_read(isl7998x->regmap, reg->reg, &val); + if (ret) + return ret; + + reg->size = 1; + reg->val = val; + + return 0; +} + +static int isl7998x_s_register(struct v4l2_subdev *sd, + const struct v4l2_dbg_register *reg) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + + return regmap_write(isl7998x->regmap, reg->reg, reg->val); +} +#endif + +static int isl7998x_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + + mutex_lock(&isl7998x->lock); + *norm = isl7998x->norm; + mutex_unlock(&isl7998x->lock); + + return 0; +} + +static int isl7998x_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + int ret = 0; + + mutex_lock(&isl7998x->lock); + if (isl7998x->enabled) { + ret = -EBUSY; + mutex_unlock(&isl7998x->lock); + return ret; + } + isl7998x->norm = norm; + mutex_unlock(&isl7998x->lock); + + if (pm_runtime_get_if_in_use(dev) <= 0) + return ret; + + ret = isl7998x_set_standard(isl7998x, norm); + + pm_runtime_put(dev); + + return ret; +} + +static int isl7998x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + unsigned int std_id[ISL7998X_INPUTS]; + unsigned int i; + int ret; + u32 reg; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + dev_dbg(dev, "starting video standard detection\n"); + + mutex_lock(&isl7998x->lock); + if (isl7998x->enabled) { + ret = -EBUSY; + goto out_unlock; + } + + ret = isl7998x_set_standard(isl7998x, V4L2_STD_UNKNOWN); + if (ret) + goto out_unlock; + + for (i = 0; i < ISL7998X_INPUTS; i++) { + ret = regmap_write(isl7998x->regmap, + ISL7998X_REG_PX_DEC_SDTR(i + 1), + ISL7998X_REG_PX_DEC_SDTR_ATSTART); + if (ret) + goto out_reset_std; + } + + for (i = 0; i < ISL7998X_INPUTS; i++) { + ret = regmap_read_poll_timeout(isl7998x->regmap, + ISL7998X_REG_PX_DEC_SDT(i + 1), + reg, + !(reg & ISL7998X_REG_PX_DEC_SDT_DET), + 2000, 500 * USEC_PER_MSEC); + if (ret) + goto out_reset_std; + std_id[i] = FIELD_GET(ISL7998X_REG_PX_DEC_SDT_NOW, reg); + } + + /* + * According to Renesas FAE, all input cameras must have the + * same standard on this chip. + */ + for (i = 0; i < isl7998x->nr_inputs; i++) { + dev_dbg(dev, "input %d: detected %s\n", + i, v4l2_norm_to_name(isl7998x_std_res[std_id[i]].norm)); + if (std_id[0] != std_id[i]) + dev_warn(dev, + "incompatible standards: %s on input %d (expected %s)\n", + v4l2_norm_to_name(isl7998x_std_res[std_id[i]].norm), i, + v4l2_norm_to_name(isl7998x_std_res[std_id[0]].norm)); + } + + *std = isl7998x_std_res[std_id[0]].norm; + +out_reset_std: + isl7998x_set_standard(isl7998x, isl7998x->norm); +out_unlock: + mutex_unlock(&isl7998x->lock); + pm_runtime_put(dev); + + return ret; +} + +static int isl7998x_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *std) +{ + *std = V4L2_STD_ALL; + + return 0; +} + +static int isl7998x_g_input_status(struct v4l2_subdev *sd, u32 *status) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + unsigned int i; + int ret = 0; + u32 reg; + + if (!pm_runtime_active(dev)) { + *status |= V4L2_IN_ST_NO_POWER; + return 0; + } + + for (i = 0; i < isl7998x->nr_inputs; i++) { + ret = regmap_read(isl7998x->regmap, + ISL7998X_REG_PX_DEC_STATUS_1(i + 1), ®); + if (!ret) { + if (reg & ISL7998X_REG_PX_DEC_STATUS_1_VDLOSS) + *status |= V4L2_IN_ST_NO_SIGNAL; + if (!(reg & ISL7998X_REG_PX_DEC_STATUS_1_HLOCK)) + *status |= V4L2_IN_ST_NO_H_LOCK; + if (!(reg & ISL7998X_REG_PX_DEC_STATUS_1_VLOCK)) + *status |= V4L2_IN_ST_NO_V_LOCK; + } + } + + return ret; +} + +static int isl7998x_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + int ret = 0; + u32 reg; + + dev_dbg(dev, "stream %s\n", enable ? "ON" : "OFF"); + + mutex_lock(&isl7998x->lock); + if (isl7998x->enabled == enable) + goto out; + isl7998x->enabled = enable; + + if (enable) { + ret = isl7998x_set_test_pattern(isl7998x); + if (ret) + goto out; + } + + regmap_read(isl7998x->regmap, + ISL7998X_REG_P5_LI_ENGINE_CTL, ®); + if (enable) + reg &= ~BIT(7); + else + reg |= BIT(7); + ret = regmap_write(isl7998x->regmap, + ISL7998X_REG_P5_LI_ENGINE_CTL, reg); + +out: + mutex_unlock(&isl7998x->lock); + + return ret; +} + +static int isl7998x_pre_streamon(struct v4l2_subdev *sd, u32 flags) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + + return pm_runtime_resume_and_get(dev); +} + +static int isl7998x_post_streamoff(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct device *dev = &client->dev; + + pm_runtime_put(dev); + + return 0; +} + +static int isl7998x_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index >= ARRAY_SIZE(isl7998x_colour_fmts)) + return -EINVAL; + + code->code = isl7998x_colour_fmts[code->index].code; + + return 0; +} + +static int isl7998x_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != isl7998x_colour_fmts[0].code) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static int isl7998x_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *format) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct v4l2_mbus_framefmt *mf = &format->format; + const struct isl7998x_mode *mode; + + mutex_lock(&isl7998x->lock); + + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + format->format = *v4l2_subdev_get_try_format(sd, sd_state, + format->pad); + goto out; + } + + mode = isl7998x_norm_to_mode(isl7998x->norm); + + mf->width = mode->width; + mf->height = mode->height; + mf->code = isl7998x->fmt->code; + mf->field = mode->field; + mf->colorspace = 0; + +out: + mutex_unlock(&isl7998x->lock); + + return 0; +} + +static int isl7998x_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *format) +{ + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + struct v4l2_mbus_framefmt *mf = &format->format; + const struct isl7998x_mode *mode; + + mutex_lock(&isl7998x->lock); + + mode = isl7998x_norm_to_mode(isl7998x->norm); + + mf->width = mode->width; + mf->height = mode->height; + mf->code = isl7998x->fmt->code; + mf->field = mode->field; + + if (format->which == V4L2_SUBDEV_FORMAT_TRY) + *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format; + + mutex_unlock(&isl7998x->lock); + + return 0; +} + +static int isl7998x_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct isl7998x *isl7998x = container_of(ctrl->handler, + struct isl7998x, ctrl_handler); + int ret = 0; + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN_BARS: + mutex_lock(&isl7998x->lock); + isl7998x->test_pattern_bars = ctrl->val & 0x3; + ret = isl7998x_set_test_pattern(isl7998x); + mutex_unlock(&isl7998x->lock); + break; + case V4L2_CID_TEST_PATTERN_CHANNELS: + mutex_lock(&isl7998x->lock); + isl7998x->test_pattern_chans = ctrl->val & 0xf; + ret = isl7998x_set_test_pattern(isl7998x); + mutex_unlock(&isl7998x->lock); + break; + case V4L2_CID_TEST_PATTERN_COLOR: + mutex_lock(&isl7998x->lock); + isl7998x->test_pattern_color = ctrl->val & 0x3; + ret = isl7998x_set_test_pattern(isl7998x); + mutex_unlock(&isl7998x->lock); + break; + case V4L2_CID_TEST_PATTERN: + mutex_lock(&isl7998x->lock); + isl7998x->test_pattern = ctrl->val; + ret = isl7998x_set_test_pattern(isl7998x); + mutex_unlock(&isl7998x->lock); + break; + } + + return ret; +} + +static const struct v4l2_subdev_core_ops isl7998x_subdev_core_ops = { +#ifdef CONFIG_VIDEO_ADV_DEBUG + .g_register = isl7998x_g_register, + .s_register = isl7998x_s_register, +#endif +}; + +static const struct v4l2_subdev_video_ops isl7998x_subdev_video_ops = { + .g_std = isl7998x_g_std, + .s_std = isl7998x_s_std, + .querystd = isl7998x_querystd, + .g_tvnorms = isl7998x_g_tvnorms, + .g_input_status = isl7998x_g_input_status, + .s_stream = isl7998x_s_stream, + .pre_streamon = isl7998x_pre_streamon, + .post_streamoff = isl7998x_post_streamoff, +}; + +static const struct v4l2_subdev_pad_ops isl7998x_subdev_pad_ops = { + .enum_mbus_code = isl7998x_enum_mbus_code, + .enum_frame_size = isl7998x_enum_frame_size, + .get_fmt = isl7998x_get_fmt, + .set_fmt = isl7998x_set_fmt, +}; + +static const struct v4l2_subdev_ops isl7998x_subdev_ops = { + .core = &isl7998x_subdev_core_ops, + .video = &isl7998x_subdev_video_ops, + .pad = &isl7998x_subdev_pad_ops, +}; + +static const struct media_entity_operations isl7998x_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_ctrl_ops isl7998x_ctrl_ops = { + .s_ctrl = isl7998x_set_ctrl, +}; + +static const struct v4l2_ctrl_config isl7998x_ctrls[] = { + { + .ops = &isl7998x_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN_BARS, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Test Pattern Bars", + .max = ARRAY_SIZE(isl7998x_test_pattern_bars) - 1, + .def = 0, + .qmenu = isl7998x_test_pattern_bars, + }, { + .ops = &isl7998x_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN_CHANNELS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "Test Pattern Channels", + .min = 0, + .max = 0xf, + .step = 1, + .def = 0xf, + .flags = 0, + }, { + .ops = &isl7998x_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN_COLOR, + .type = V4L2_CTRL_TYPE_MENU, + .name = "Test Pattern Color", + .max = ARRAY_SIZE(isl7998x_test_pattern_colors) - 1, + .def = 0, + .qmenu = isl7998x_test_pattern_colors, + }, +}; + +#define ISL7998X_REG_DECODER_ACA_READABLE_RANGE(page) \ + /* Decoder range */ \ + regmap_reg_range(ISL7998X_REG_PX_DEC_INPUT_FMT(page), \ + ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_ANCTL(page), \ + ISL7998X_REG_PX_DEC_CSC_CTL(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_BRIGHT(page), \ + ISL7998X_REG_PX_DEC_HUE(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_VERT_PEAK(page), \ + ISL7998X_REG_PX_DEC_CORING(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \ + ISL7998X_REG_PX_DEC_SDTR(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_CLMPG(page), \ + ISL7998X_REG_PX_DEC_DATA_CONV(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_INTERNAL_TEST(page), \ + ISL7998X_REG_PX_DEC_INTERNAL_TEST(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_H_DELAY_CTL(page), \ + ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(page)), \ + /* ACA range */ \ + regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_1(page), \ + ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_Y_AVG(page), \ + ISL7998X_REG_PX_ACA_CTL_4(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(page), \ + ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(page), \ + ISL7998X_REG_PX_DEC_PAGE(page)) + +#define ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(page) \ + /* Decoder range */ \ + regmap_reg_range(ISL7998X_REG_PX_DEC_INPUT_FMT(page), \ + ISL7998X_REG_PX_DEC_INPUT_FMT(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page), \ + ISL7998X_REG_PX_DEC_HS_DELAY_CTL(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_ANCTL(page), \ + ISL7998X_REG_PX_DEC_CSC_CTL(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_BRIGHT(page), \ + ISL7998X_REG_PX_DEC_HUE(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_VERT_PEAK(page), \ + ISL7998X_REG_PX_DEC_CORING(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \ + ISL7998X_REG_PX_DEC_SDTR(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_CLMPG(page), \ + ISL7998X_REG_PX_DEC_MISC3(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_CLMD(page), \ + ISL7998X_REG_PX_DEC_DATA_CONV(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_INTERNAL_TEST(page), \ + ISL7998X_REG_PX_DEC_INTERNAL_TEST(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_H_DELAY_CTL(page), \ + ISL7998X_REG_PX_DEC_H_DELAY_II_LOW(page)), \ + /* ACA range */ \ + regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_1(page), \ + ISL7998X_REG_PX_ACA_HIST_WIN_V_SZ2(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_CTL_2(page), \ + ISL7998X_REG_PX_ACA_CTL_4(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_FLEX_WIN_HIST(page), \ + ISL7998X_REG_PX_ACA_HIST_DATA_LO(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page), \ + ISL7998X_REG_PX_ACA_XFER_HIST_HOST(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(page), \ + ISL7998X_REG_PX_DEC_PAGE(page)) + +#define ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(page) \ + /* Decoder range */ \ + regmap_reg_range(ISL7998X_REG_PX_DEC_STATUS_1(page), \ + ISL7998X_REG_PX_DEC_STATUS_1(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_SDT(page), \ + ISL7998X_REG_PX_DEC_SDT(page)), \ + regmap_reg_range(ISL7998X_REG_PX_DEC_MVSN(page), \ + ISL7998X_REG_PX_DEC_HFREF(page)), \ + /* ACA range */ \ + regmap_reg_range(ISL7998X_REG_PX_ACA_Y_AVG(page), \ + ISL7998X_REG_PX_ACA_Y_HIGH(page)), \ + regmap_reg_range(ISL7998X_REG_PX_ACA_HIST_DATA_LO(page), \ + ISL7998X_REG_PX_ACA_FLEX_WIN_CR_CLR(page)) + +static const struct regmap_range isl7998x_readable_ranges[] = { + regmap_reg_range(ISL7998X_REG_P0_PRODUCT_ID_CODE, + ISL7998X_REG_P0_IRQ_SYNC_CTL), + regmap_reg_range(ISL7998X_REG_P0_INTERRUPT_STATUS, + ISL7998X_REG_P0_CLOCK_DELAY), + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(0), + ISL7998X_REG_PX_DEC_PAGE(0)), + + ISL7998X_REG_DECODER_ACA_READABLE_RANGE(1), + ISL7998X_REG_DECODER_ACA_READABLE_RANGE(2), + ISL7998X_REG_DECODER_ACA_READABLE_RANGE(3), + ISL7998X_REG_DECODER_ACA_READABLE_RANGE(4), + + regmap_reg_range(ISL7998X_REG_P5_LI_ENGINE_CTL, + ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL), + regmap_reg_range(ISL7998X_REG_P5_FIFO_THRSH_CNT_1, + ISL7998X_REG_P5_PLL_ANA), + regmap_reg_range(ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1, + ISL7998X_REG_P5_HIST_LINE_CNT_2), + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(5), + ISL7998X_REG_PX_DEC_PAGE(5)), +}; + +static const struct regmap_range isl7998x_writeable_ranges[] = { + regmap_reg_range(ISL7998X_REG_P0_SW_RESET_CTL, + ISL7998X_REG_P0_IRQ_SYNC_CTL), + regmap_reg_range(ISL7998X_REG_P0_CHAN_1_IRQ, + ISL7998X_REG_P0_SHORT_DIAG_IRQ_EN), + regmap_reg_range(ISL7998X_REG_P0_CLOCK_DELAY, + ISL7998X_REG_P0_CLOCK_DELAY), + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(0), + ISL7998X_REG_PX_DEC_PAGE(0)), + + ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(1), + ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(2), + ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(3), + ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(4), + + regmap_reg_range(ISL7998X_REG_P5_LI_ENGINE_CTL, + ISL7998X_REG_P5_ESC_MODE_TIME_CTL), + regmap_reg_range(ISL7998X_REG_P5_MIPI_SP_HS_TRL_CTL, + ISL7998X_REG_P5_PLL_ANA), + regmap_reg_range(ISL7998X_REG_P5_TOTAL_PF_LINE_CNT_1, + ISL7998X_REG_P5_HIST_LINE_CNT_2), + regmap_reg_range(ISL7998X_REG_PX_DEC_PAGE(5), + ISL7998X_REG_PX_DEC_PAGE(5)), + + ISL7998X_REG_DECODER_ACA_WRITEABLE_RANGE(0xf), +}; + +static const struct regmap_range isl7998x_volatile_ranges[] = { + /* Product id code register is used to check availability */ + regmap_reg_range(ISL7998X_REG_P0_PRODUCT_ID_CODE, + ISL7998X_REG_P0_PRODUCT_ID_CODE), + regmap_reg_range(ISL7998X_REG_P0_MPP1_SYNC_CTL, + ISL7998X_REG_P0_IRQ_SYNC_CTL), + regmap_reg_range(ISL7998X_REG_P0_INTERRUPT_STATUS, + ISL7998X_REG_P0_INTERRUPT_STATUS), + regmap_reg_range(ISL7998X_REG_P0_CHAN_1_STATUS, + ISL7998X_REG_P0_SHORT_DIAG_STATUS), + + ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(1), + ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(2), + ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(3), + ISL7998X_REG_DECODER_ACA_VOLATILE_RANGE(4), + + regmap_reg_range(ISL7998X_REG_P5_AUTO_TEST_ERR_DET, + ISL7998X_REG_P5_PIC_HEIGHT_LOW), +}; + +static const struct regmap_access_table isl7998x_readable_table = { + .yes_ranges = isl7998x_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(isl7998x_readable_ranges), +}; + +static const struct regmap_access_table isl7998x_writeable_table = { + .yes_ranges = isl7998x_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(isl7998x_writeable_ranges), +}; + +static const struct regmap_access_table isl7998x_volatile_table = { + .yes_ranges = isl7998x_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(isl7998x_volatile_ranges), +}; + +static const struct regmap_range_cfg isl7998x_ranges[] = { + { + .range_min = ISL7998X_REG_PN_BASE(0), + .range_max = ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf), + .selector_reg = ISL7998X_REG_PX_DEC_PAGE(0), + .selector_mask = ISL7998X_REG_PX_DEC_PAGE_MASK, + .window_start = 0, + .window_len = 256, + } +}; + +static const struct regmap_config isl7998x_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = ISL7998X_REG_PX_ACA_XFER_HIST_HOST(0xf), + .ranges = isl7998x_ranges, + .num_ranges = ARRAY_SIZE(isl7998x_ranges), + .rd_table = &isl7998x_readable_table, + .wr_table = &isl7998x_writeable_table, + .volatile_table = &isl7998x_volatile_table, + .cache_type = REGCACHE_RBTREE, +}; + +static int isl7998x_mc_init(struct isl7998x *isl7998x) +{ + unsigned int i; + + isl7998x->subdev.entity.ops = &isl7998x_entity_ops; + isl7998x->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + + isl7998x->pads[ISL7998X_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE; + for (i = ISL7998X_PAD_VIN1; i < ISL7998X_NUM_PADS; i++) + isl7998x->pads[i].flags = MEDIA_PAD_FL_SINK; + + return media_entity_pads_init(&isl7998x->subdev.entity, + ISL7998X_NUM_PADS, + isl7998x->pads); +} + +static int get_link_freq_menu_index(unsigned int lanes, + unsigned int inputs) +{ + int ret = -EINVAL; + + switch (lanes) { + case 1: + if (inputs == 1) + ret = 0; + if (inputs == 2) + ret = 1; + if (inputs == 4) + ret = 2; + break; + case 2: + if (inputs == 2) + ret = 0; + if (inputs == 4) + ret = 1; + break; + default: + break; + } + + return ret; +} + +static void isl7998x_remove_controls(struct isl7998x *isl7998x) +{ + v4l2_ctrl_handler_free(&isl7998x->ctrl_handler); + mutex_destroy(&isl7998x->ctrl_mutex); +} + +static int isl7998x_init_controls(struct isl7998x *isl7998x) +{ + struct v4l2_subdev *sd = &isl7998x->subdev; + int link_freq_index; + unsigned int i; + int ret; + + ret = v4l2_ctrl_handler_init(&isl7998x->ctrl_handler, + 2 + ARRAY_SIZE(isl7998x_ctrls)); + if (ret) + return ret; + + mutex_init(&isl7998x->ctrl_mutex); + isl7998x->ctrl_handler.lock = &isl7998x->ctrl_mutex; + link_freq_index = get_link_freq_menu_index(isl7998x->nr_mipi_lanes, + isl7998x->nr_inputs); + if (link_freq_index < 0 || + link_freq_index >= ARRAY_SIZE(link_freq_menu_items)) { + dev_err(sd->dev, + "failed to find MIPI link freq: %d lanes, %d inputs\n", + isl7998x->nr_mipi_lanes, isl7998x->nr_inputs); + ret = -EINVAL; + goto err; + } + + isl7998x->link_freq = v4l2_ctrl_new_int_menu(&isl7998x->ctrl_handler, + &isl7998x_ctrl_ops, + V4L2_CID_LINK_FREQ, + ARRAY_SIZE(link_freq_menu_items) - 1, + link_freq_index, + link_freq_menu_items); + if (isl7998x->link_freq) + isl7998x->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + for (i = 0; i < ARRAY_SIZE(isl7998x_ctrls); i++) + v4l2_ctrl_new_custom(&isl7998x->ctrl_handler, + &isl7998x_ctrls[i], NULL); + + v4l2_ctrl_new_std_menu_items(&isl7998x->ctrl_handler, + &isl7998x_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(isl7998x_test_pattern_menu) - 1, + 0, 0, isl7998x_test_pattern_menu); + + ret = isl7998x->ctrl_handler.error; + if (ret) + goto err; + + isl7998x->subdev.ctrl_handler = &isl7998x->ctrl_handler; + v4l2_ctrl_handler_setup(&isl7998x->ctrl_handler); + + return 0; + +err: + isl7998x_remove_controls(isl7998x); + + return ret; +} + +static int isl7998x_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct v4l2_fwnode_endpoint endpoint = { + .bus_type = V4L2_MBUS_CSI2_DPHY, + }; + struct fwnode_handle *ep; + struct isl7998x *isl7998x; + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + int nr_inputs; + int ret; + + ret = i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA); + if (!ret) { + dev_warn(&adapter->dev, + "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); + return -EIO; + } + + isl7998x = devm_kzalloc(dev, sizeof(*isl7998x), GFP_KERNEL); + if (!isl7998x) + return -ENOMEM; + + isl7998x->pd_gpio = devm_gpiod_get_optional(dev, "powerdown", + GPIOD_OUT_HIGH); + if (IS_ERR(isl7998x->pd_gpio)) + return dev_err_probe(dev, PTR_ERR(isl7998x->pd_gpio), + "Failed to retrieve/request PD GPIO\n"); + + isl7998x->rstb_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(isl7998x->rstb_gpio)) + return dev_err_probe(dev, PTR_ERR(isl7998x->rstb_gpio), + "Failed to retrieve/request RSTB GPIO\n"); + + isl7998x->regmap = devm_regmap_init_i2c(client, &isl7998x_regmap); + if (IS_ERR(isl7998x->regmap)) + return dev_err_probe(dev, PTR_ERR(isl7998x->regmap), + "Failed to allocate register map\n"); + + ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), + ISL7998X_PAD_OUT, 0, 0); + if (!ep) + return dev_err_probe(dev, -EINVAL, "Missing endpoint node\n"); + + ret = v4l2_fwnode_endpoint_parse(ep, &endpoint); + fwnode_handle_put(ep); + if (ret) + return dev_err_probe(dev, ret, "Failed to parse endpoint\n"); + + if (endpoint.bus.mipi_csi2.num_data_lanes == 0 || + endpoint.bus.mipi_csi2.num_data_lanes > 2) + return dev_err_probe(dev, -EINVAL, + "Invalid number of MIPI lanes\n"); + + isl7998x->nr_mipi_lanes = endpoint.bus.mipi_csi2.num_data_lanes; + + nr_inputs = isl7998x_get_nr_inputs(dev->of_node); + if (nr_inputs < 0) + return dev_err_probe(dev, nr_inputs, + "Invalid number of input ports\n"); + isl7998x->nr_inputs = nr_inputs; + + v4l2_i2c_subdev_init(&isl7998x->subdev, client, &isl7998x_subdev_ops); + isl7998x->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + + ret = isl7998x_mc_init(isl7998x); + if (ret < 0) + return ret; + + isl7998x->fmt = &isl7998x_colour_fmts[0]; + isl7998x->norm = V4L2_STD_NTSC; + isl7998x->enabled = 0; + + mutex_init(&isl7998x->lock); + + ret = isl7998x_init_controls(isl7998x); + if (ret) + goto err_entity_cleanup; + + ret = v4l2_async_register_subdev(&isl7998x->subdev); + if (ret < 0) + goto err_controls_cleanup; + + pm_runtime_enable(dev); + + return 0; + +err_controls_cleanup: + isl7998x_remove_controls(isl7998x); +err_entity_cleanup: + media_entity_cleanup(&isl7998x->subdev.entity); + + return ret; +} + +static int isl7998x_remove(struct i2c_client *client) +{ + struct isl7998x *isl7998x = i2c_to_isl7998x(client); + + pm_runtime_disable(&client->dev); + v4l2_async_unregister_subdev(&isl7998x->subdev); + isl7998x_remove_controls(isl7998x); + media_entity_cleanup(&isl7998x->subdev.entity); + + return 0; +} + +static const struct of_device_id isl7998x_of_match[] = { + { .compatible = "isil,isl79987", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, isl7998x_of_match); + +static const struct i2c_device_id isl7998x_id[] = { + { "isl79987", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(i2c, isl7998x_id); + +static int __maybe_unused isl7998x_runtime_resume(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + int ret; + + gpiod_set_value(isl7998x->rstb_gpio, 1); + gpiod_set_value(isl7998x->pd_gpio, 0); + gpiod_set_value(isl7998x->rstb_gpio, 0); + + ret = isl7998x_wait_power_on(isl7998x); + if (ret) + goto err; + + ret = isl7998x_init(isl7998x); + if (ret) + goto err; + + return 0; + +err: + gpiod_set_value(isl7998x->pd_gpio, 1); + + return ret; +} + +static int __maybe_unused isl7998x_runtime_suspend(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct isl7998x *isl7998x = sd_to_isl7998x(sd); + + gpiod_set_value(isl7998x->pd_gpio, 1); + + return 0; +} + +static const struct dev_pm_ops isl7998x_pm_ops = { + SET_RUNTIME_PM_OPS(isl7998x_runtime_suspend, + isl7998x_runtime_resume, + NULL) +}; + +static struct i2c_driver isl7998x_i2c_driver = { + .driver = { + .name = "isl7998x", + .of_match_table = of_match_ptr(isl7998x_of_match), + .pm = &isl7998x_pm_ops, + }, + .probe_new = isl7998x_probe, + .remove = isl7998x_remove, + .id_table = isl7998x_id, +}; + +module_i2c_driver(isl7998x_i2c_driver); + +MODULE_DESCRIPTION("Intersil ISL7998x Analog to MIPI CSI-2/BT656 decoder"); +MODULE_AUTHOR("Marek Vasut "); +MODULE_LICENSE("GPL v2"); diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index c8e0f84d204d..92576ed03fc4 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -219,6 +219,12 @@ enum v4l2_colorfx { */ #define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170) +/* + * The base for the isl7998x driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ -- cgit v1.2.3-71-gd317 From bfa26ba343c727e055223be04e08f2ebdd43c293 Mon Sep 17 00:00:00 2001 From: William Mahon Date: Thu, 3 Mar 2022 18:23:42 -0800 Subject: HID: add mapping for KEY_DICTATE Numerous keyboards are adding dictate keys which allows for text messages to be dictated by a microphone. This patch adds a new key definition KEY_DICTATE and maps 0x0c/0x0d8 usage code to this new keycode. Additionally hid-debug is adjusted to recognize this new usage code as well. Signed-off-by: William Mahon Acked-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20220303021501.1.I5dbf50eb1a7a6734ee727bda4a8573358c6d3ec0@changeid Signed-off-by: Dmitry Torokhov --- drivers/hid/hid-debug.c | 1 + drivers/hid/hid-input.c | 1 + include/uapi/linux/input-event-codes.h | 1 + 3 files changed, 3 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 26c31d759914..8aa68416b1d7 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -969,6 +969,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 112901d2d8d2..ce2b75a67cb8 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 225ec87d4f22..4db5d41848e4 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -612,6 +612,7 @@ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ #define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ +#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ -- cgit v1.2.3-71-gd317 From 327b89f0acc4c20a06ed59e4d9af7f6d804dc2e2 Mon Sep 17 00:00:00 2001 From: William Mahon Date: Thu, 3 Mar 2022 18:26:22 -0800 Subject: HID: add mapping for KEY_ALL_APPLICATIONS This patch adds a new key definition for KEY_ALL_APPLICATIONS and aliases KEY_DASHBOARD to it. It also maps the 0x0c/0x2a2 usage code to KEY_ALL_APPLICATIONS. Signed-off-by: William Mahon Acked-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20220303035618.1.I3a7746ad05d270161a18334ae06e3b6db1a1d339@changeid Signed-off-by: Dmitry Torokhov --- drivers/hid/hid-debug.c | 4 +++- drivers/hid/hid-input.c | 2 ++ include/uapi/linux/input-event-codes.h | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 8aa68416b1d7..81e7e404a5fc 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index ce2b75a67cb8..56ec27398a00 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1084,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 4db5d41848e4..7989d9483ea7 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -278,7 +278,8 @@ #define KEY_PAUSECD 201 #define KEY_PROG3 202 #define KEY_PROG4 203 -#define KEY_DASHBOARD 204 /* AL Dashboard */ +#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */ +#define KEY_DASHBOARD KEY_ALL_APPLICATIONS #define KEY_SUSPEND 205 #define KEY_CLOSE 206 /* AC Close */ #define KEY_PLAY 207 -- cgit v1.2.3-71-gd317 From 96ba61ee5331eb6e2f4c2baeb994b9ceb01d8266 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 20 Feb 2022 21:46:16 +0100 Subject: media: v4l2-ctrls: Add new V4L2_H264_DECODE_PARAM_FLAG_P/BFRAME flags Add new V4L2_H264_DECODE_PARAM_FLAG_P/BFRAME flags that are needed by NVIDIA Tegra video decoder. Userspace will have to set these flags in accordance to the type of a decoded frame. Reviewed-by: Nicolas Dufresne Signed-off-by: Dmitry Osipenko Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst | 6 ++++++ include/uapi/linux/v4l2-controls.h | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst index cc080c4257d0..f87584ad90ba 100644 --- a/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst +++ b/Documentation/userspace-api/media/v4l/ext-ctrls-codec-stateless.rst @@ -616,6 +616,12 @@ Stateless Codec Control ID * - ``V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD`` - 0x00000004 - + * - ``V4L2_H264_DECODE_PARAM_FLAG_PFRAME`` + - 0x00000008 + - + * - ``V4L2_H264_DECODE_PARAM_FLAG_BFRAME`` + - 0x00000010 + - .. raw:: latex diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index c8e0f84d204d..e3d48d571062 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -1563,6 +1563,8 @@ struct v4l2_h264_dpb_entry { #define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 #define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02 #define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04 +#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08 +#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10 #define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7) /** -- cgit v1.2.3-71-gd317 From 1092347165cf5ed1453c1f211641a859818a2828 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Wed, 9 Feb 2022 17:03:12 +0100 Subject: media: lirc: remove unused feature LIRC_CAN_SET_REC_DUTY_CYCLE There is no hardware which can filter input on the duty cycle, so no driver implements this. On top of that, LIRC_CAN_SET_REC_DUTY_CYCLE has the same value as LIRC_CAN_MEASURE_CARRIER (0x02000000). Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab --- Documentation/userspace-api/media/lirc.h.rst.exceptions | 1 - include/uapi/linux/lirc.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/lirc.h.rst.exceptions b/Documentation/userspace-api/media/lirc.h.rst.exceptions index 5f31e967bc50..913d17b49831 100644 --- a/Documentation/userspace-api/media/lirc.h.rst.exceptions +++ b/Documentation/userspace-api/media/lirc.h.rst.exceptions @@ -30,7 +30,6 @@ ignore define LIRC_CAN_REC ignore define LIRC_CAN_SEND_MASK ignore define LIRC_CAN_REC_MASK -ignore define LIRC_CAN_SET_REC_DUTY_CYCLE # Obsolete ioctls diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index 21c69a6a100d..23b0f2c8ba81 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -73,7 +73,6 @@ #define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) #define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) #define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 #define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -- cgit v1.2.3-71-gd317 From 72a74c8f0a0df12c7d7ea012aa70d95152858dea Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Thu, 24 Feb 2022 11:10:00 +0800 Subject: media: add nv12m_8l128 and nv12m_10be_8l128 video format. nv12m_8l128 is 8-bit tiled nv12 format used by amphion decoder. nv12m_10be_8l128 is 10-bit tiled format used by amphion decoder. The tile size is 8x128 Signed-off-by: Ming Qian Signed-off-by: Shijie Qin Signed-off-by: Zhou Peng Signed-off-by: Hans Verkuil --- .../userspace-api/media/v4l/pixfmt-yuv-planar.rst | 28 +++++++++++++++++++--- drivers/media/v4l2-core/v4l2-ioctl.c | 2 ++ include/uapi/linux/videodev2.h | 2 ++ 3 files changed, 29 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst index 9a969f662595..cc3e4b5791c5 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst @@ -257,6 +257,8 @@ of the luma plane. .. _V4L2-PIX-FMT-NV12-4L4: .. _V4L2-PIX-FMT-NV12-16L16: .. _V4L2-PIX-FMT-NV12-32L32: +.. _V4L2_PIX_FMT_NV12M_8L128: +.. _V4L2_PIX_FMT_NV12M_10BE_8L128: Tiled NV12 ---------- @@ -281,21 +283,41 @@ If the vertical resolution is an odd number of tiles, the last row of tiles is stored in linear order. The layouts of the luma and chroma planes are identical. -``V4L2_PIX_FMT_NV12_4L4`` stores pixel in 4x4 tiles, and stores +``V4L2_PIX_FMT_NV12_4L4`` stores pixels in 4x4 tiles, and stores tiles linearly in memory. The line stride and image height must be aligned to a multiple of 4. The layouts of the luma and chroma planes are identical. -``V4L2_PIX_FMT_NV12_16L16`` stores pixel in 16x16 tiles, and stores +``V4L2_PIX_FMT_NV12_16L16`` stores pixels in 16x16 tiles, and stores tiles linearly in memory. The line stride and image height must be aligned to a multiple of 16. The layouts of the luma and chroma planes are identical. -``V4L2_PIX_FMT_NV12_32L32`` stores pixel in 32x32 tiles, and stores +``V4L2_PIX_FMT_NV12_32L32`` stores pixels in 32x32 tiles, and stores tiles linearly in memory. The line stride and image height must be aligned to a multiple of 32. The layouts of the luma and chroma planes are identical. +``V4L2_PIX_FMT_NV12M_8L128`` is similar to ``V4L2_PIX_FMT_NV12M`` but stores +pixels in 2D 8x128 tiles, and stores tiles linearly in memory. +The image height must be aligned to a multiple of 128. +The layouts of the luma and chroma planes are identical. + +``V4L2_PIX_FMT_NV12M_10BE_8L128`` is similar to ``V4L2_PIX_FMT_NV12M`` but stores +10 bits pixels in 2D 8x128 tiles, and stores tiles linearly in memory. +the data is arranged in big endian order. +The image height must be aligned to a multiple of 128. +The layouts of the luma and chroma planes are identical. +Note the tile size is 8bytes multiplied by 128 bytes, +it means that the low bits and high bits of one pixel may be in different tiles. +The 10 bit pixels are packed, so 5 bytes contain 4 10-bit pixels layout like +this (for luma): +byte 0: Y0(bits 9-2) +byte 1: Y0(bits 1-0) Y1(bits 9-4) +byte 2: Y1(bits 3-0) Y2(bits 9-6) +byte 3: Y2(bits 5-0) Y3(bits 9-8) +byte 4: Y3(bits 7-0) + ``V4L2_PIX_FMT_MM21`` store luma pixel in 16x32 tiles, and chroma pixels in 16x16 tiles. The line stride must be aligned to a multiple of 16 and the image height must be aligned to a multiple of 32. The number of luma and chroma diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 642cb90f457c..96e307fe3aab 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1390,6 +1390,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break; case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break; case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break; + case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; + case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; default: /* Compressed formats */ diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index df8b9c486ba1..3768a0a80830 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -632,6 +632,8 @@ struct v4l2_pix_format { /* Tiled YUV formats, non contiguous planes */ #define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */ #define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */ +#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */ +#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */ /* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ -- cgit v1.2.3-71-gd317 From d045b9eb95a9b611c483897a69e7285aefdc66d7 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 7 Mar 2022 12:44:36 -0800 Subject: mptcp: introduce implicit endpoints In some edge scenarios, an MPTCP subflows can use a local address mapped by a "implicit" endpoint created by the in-kernel path manager. Such endpoints presence can be confusing, as it's creation is hard to track and will prevent the later endpoint creation from the user-space using the same address. Define a new endpoint flag to mark implicit endpoints and allow the user-space to replace implicit them with user-provided data at endpoint creation time. Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- include/uapi/linux/mptcp.h | 1 + net/mptcp/pm_netlink.c | 61 ++++++++++++++++++------- tools/testing/selftests/net/mptcp/mptcp_join.sh | 4 +- 3 files changed, 47 insertions(+), 19 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index f106a3941cdf..9690efedb5fa 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -81,6 +81,7 @@ enum { #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) #define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) +#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) enum { MPTCP_PM_CMD_UNSPEC, diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 91b77d1162cf..10368a4f1c4a 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -877,10 +877,18 @@ static bool address_use_port(struct mptcp_pm_addr_entry *entry) MPTCP_PM_ADDR_FLAG_SIGNAL; } +/* caller must ensure the RCU grace period is already elapsed */ +static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) +{ + if (entry->lsk) + sock_release(entry->lsk); + kfree(entry); +} + static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, struct mptcp_pm_addr_entry *entry) { - struct mptcp_pm_addr_entry *cur; + struct mptcp_pm_addr_entry *cur, *del_entry = NULL; unsigned int addr_max; int ret = -EINVAL; @@ -901,8 +909,22 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, list_for_each_entry(cur, &pernet->local_addr_list, list) { if (addresses_equal(&cur->addr, &entry->addr, address_use_port(entry) && - address_use_port(cur))) - goto out; + address_use_port(cur))) { + /* allow replacing the exiting endpoint only if such + * endpoint is an implicit one and the user-space + * did not provide an endpoint id + */ + if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) + goto out; + if (entry->addr.id) + goto out; + + pernet->addrs--; + entry->addr.id = cur->addr.id; + list_del_rcu(&cur->list); + del_entry = cur; + break; + } } if (!entry->addr.id) { @@ -938,6 +960,12 @@ find_next: out: spin_unlock_bh(&pernet->lock); + + /* just replaced an existing entry, free it */ + if (del_entry) { + synchronize_rcu(); + __mptcp_pm_release_addr_entry(del_entry); + } return ret; } @@ -1036,7 +1064,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) entry->addr.id = 0; entry->addr.port = 0; entry->ifindex = 0; - entry->flags = 0; + entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; entry->lsk = NULL; ret = mptcp_pm_nl_append_new_local_addr(pernet, entry); if (ret < 0) @@ -1249,6 +1277,11 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { + GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); + return -EINVAL; + } + entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { GENL_SET_ERR_MSG(info, "can't allocate addr"); @@ -1333,11 +1366,12 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, } static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, - struct mptcp_addr_info *addr) + const struct mptcp_pm_addr_entry *entry) { - struct mptcp_sock *msk; - long s_slot = 0, s_num = 0; + const struct mptcp_addr_info *addr = &entry->addr; struct mptcp_rm_list list = { .nr = 0 }; + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; pr_debug("remove_id=%d", addr->id); @@ -1354,7 +1388,8 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, lock_sock(sk); remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); - mptcp_pm_remove_anno_addr(msk, addr, remove_subflow); + mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && + !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); if (remove_subflow) mptcp_pm_remove_subflow(msk, &list); release_sock(sk); @@ -1367,14 +1402,6 @@ next: return 0; } -/* caller must ensure the RCU grace period is already elapsed */ -static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) -{ - if (entry->lsk) - sock_release(entry->lsk); - kfree(entry); -} - static int mptcp_nl_remove_id_zero_address(struct net *net, struct mptcp_addr_info *addr) { @@ -1451,7 +1478,7 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info) __clear_bit(entry->addr.id, pernet->id_bitmap); spin_unlock_bh(&pernet->lock); - mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr); + mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); synchronize_rcu(); __mptcp_pm_release_addr_entry(entry); diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 02bab8a2d5a5..1e2e8dd9f0d6 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1938,7 +1938,7 @@ backup_tests() run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup chk_join_nr "single address, backup" 1 1 1 chk_add_nr 1 1 - chk_prio_nr 1 0 + chk_prio_nr 1 1 # single address with port, backup reset @@ -1948,7 +1948,7 @@ backup_tests() run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup chk_join_nr "single address with port, backup" 1 1 1 chk_add_nr 1 1 - chk_prio_nr 1 0 + chk_prio_nr 1 1 } add_addr_ports_tests() -- cgit v1.2.3-71-gd317 From b530e9e1063ed2b817eae7eec6ed2daa8be11608 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Wed, 9 Mar 2022 11:53:42 +0100 Subject: bpf: Add "live packet" mode for XDP in BPF_PROG_RUN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for running XDP programs through BPF_PROG_RUN in a mode that enables live packet processing of the resulting frames. Previous uses of BPF_PROG_RUN for XDP returned the XDP program return code and the modified packet data to userspace, which is useful for unit testing of XDP programs. The existing BPF_PROG_RUN for XDP allows userspace to set the ingress ifindex and RXQ number as part of the context object being passed to the kernel. This patch reuses that code, but adds a new mode with different semantics, which can be selected with the new BPF_F_TEST_XDP_LIVE_FRAMES flag. When running BPF_PROG_RUN in this mode, the XDP program return codes will be honoured: returning XDP_PASS will result in the frame being injected into the networking stack as if it came from the selected networking interface, while returning XDP_TX and XDP_REDIRECT will result in the frame being transmitted out that interface. XDP_TX is translated into an XDP_REDIRECT operation to the same interface, since the real XDP_TX action is only possible from within the network drivers themselves, not from the process context where BPF_PROG_RUN is executed. Internally, this new mode of operation creates a page pool instance while setting up the test run, and feeds pages from that into the XDP program. The setup cost of this is amortised over the number of repetitions specified by userspace. To support the performance testing use case, we further optimise the setup step so that all pages in the pool are pre-initialised with the packet data, and pre-computed context and xdp_frame objects stored at the start of each page. This makes it possible to entirely avoid touching the page content on each XDP program invocation, and enables sending up to 9 Mpps/core on my test box. Because the data pages are recycled by the page pool, and the test runner doesn't re-initialise them for each run, subsequent invocations of the XDP program will see the packet data in the state it was after the last time it ran on that particular page. This means that an XDP program that modifies the packet before redirecting it has to be careful about which assumptions it makes about the packet content, but that is only an issue for the most naively written programs. Enabling the new flag is only allowed when not setting ctx_out and data_out in the test specification, since using it means frames will be redirected somewhere else, so they can't be returned. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-2-toke@redhat.com --- include/uapi/linux/bpf.h | 3 + kernel/bpf/Kconfig | 1 + kernel/bpf/syscall.c | 2 +- net/bpf/test_run.c | 334 +++++++++++++++++++++++++++++++++++++++-- tools/include/uapi/linux/bpf.h | 3 + 5 files changed, 328 insertions(+), 15 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4eebea830613..bc23020b638d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1232,6 +1232,8 @@ enum { /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) +/* If set, XDP frames will be transmitted after processing */ +#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -1393,6 +1395,7 @@ union bpf_attr { __aligned_u64 ctx_out; __u32 flags; __u32 cpu; + __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig index c3cf0b86eeb2..d56ee177d5f8 100644 --- a/kernel/bpf/Kconfig +++ b/kernel/bpf/Kconfig @@ -30,6 +30,7 @@ config BPF_SYSCALL select TASKS_TRACE_RCU select BINARY_PRINTF select NET_SOCK_MSG if NET + select PAGE_POOL if NET default n help Enable the bpf() system call that allows to manipulate BPF programs diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index db402ebc5570..9beb585be5a6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3336,7 +3336,7 @@ static int bpf_prog_query(const union bpf_attr *attr, } } -#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu +#define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size static int bpf_prog_test_run(const union bpf_attr *attr, union bpf_attr __user *uattr) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index ba410b069824..25169908be4a 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -53,10 +54,11 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t) rcu_read_unlock(); } -static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) +static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, + u32 repeat, int *err, u32 *duration) __must_hold(rcu) { - t->i++; + t->i += iterations; if (t->i >= repeat) { /* We're done. */ t->time_spent += ktime_get_ns() - t->time_start; @@ -88,6 +90,286 @@ reset: return false; } +/* We put this struct at the head of each page with a context and frame + * initialised when the page is allocated, so we don't have to do this on each + * repetition of the test run. + */ +struct xdp_page_head { + struct xdp_buff orig_ctx; + struct xdp_buff ctx; + struct xdp_frame frm; + u8 data[]; +}; + +struct xdp_test_data { + struct xdp_buff *orig_ctx; + struct xdp_rxq_info rxq; + struct net_device *dev; + struct page_pool *pp; + struct xdp_frame **frames; + struct sk_buff **skbs; + u32 batch_size; + u32 frame_cnt; +}; + +#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head) \ + - sizeof(struct skb_shared_info)) +#define TEST_XDP_MAX_BATCH 256 + +static void xdp_test_run_init_page(struct page *page, void *arg) +{ + struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); + struct xdp_buff *new_ctx, *orig_ctx; + u32 headroom = XDP_PACKET_HEADROOM; + struct xdp_test_data *xdp = arg; + size_t frm_len, meta_len; + struct xdp_frame *frm; + void *data; + + orig_ctx = xdp->orig_ctx; + frm_len = orig_ctx->data_end - orig_ctx->data_meta; + meta_len = orig_ctx->data - orig_ctx->data_meta; + headroom -= meta_len; + + new_ctx = &head->ctx; + frm = &head->frm; + data = &head->data; + memcpy(data + headroom, orig_ctx->data_meta, frm_len); + + xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); + xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); + new_ctx->data = new_ctx->data_meta + meta_len; + + xdp_update_frame_from_buff(new_ctx, frm); + frm->mem = new_ctx->rxq->mem; + + memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); +} + +static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) +{ + struct xdp_mem_info mem = {}; + struct page_pool *pp; + int err = -ENOMEM; + struct page_pool_params pp_params = { + .order = 0, + .flags = 0, + .pool_size = xdp->batch_size, + .nid = NUMA_NO_NODE, + .max_len = TEST_XDP_FRAME_SIZE, + .init_callback = xdp_test_run_init_page, + .init_arg = xdp, + }; + + xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); + if (!xdp->frames) + return -ENOMEM; + + xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); + if (!xdp->skbs) + goto err_skbs; + + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) { + err = PTR_ERR(pp); + goto err_pp; + } + + /* will copy 'mem.id' into pp->xdp_mem_id */ + err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp); + if (err) + goto err_mmodel; + + xdp->pp = pp; + + /* We create a 'fake' RXQ referencing the original dev, but with an + * xdp_mem_info pointing to our page_pool + */ + xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); + xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; + xdp->rxq.mem.id = pp->xdp_mem_id; + xdp->dev = orig_ctx->rxq->dev; + xdp->orig_ctx = orig_ctx; + + return 0; + +err_mmodel: + page_pool_destroy(pp); +err_pp: + kfree(xdp->skbs); +err_skbs: + kfree(xdp->frames); + return err; +} + +static void xdp_test_run_teardown(struct xdp_test_data *xdp) +{ + page_pool_destroy(xdp->pp); + kfree(xdp->frames); + kfree(xdp->skbs); +} + +static bool ctx_was_changed(struct xdp_page_head *head) +{ + return head->orig_ctx.data != head->ctx.data || + head->orig_ctx.data_meta != head->ctx.data_meta || + head->orig_ctx.data_end != head->ctx.data_end; +} + +static void reset_ctx(struct xdp_page_head *head) +{ + if (likely(!ctx_was_changed(head))) + return; + + head->ctx.data = head->orig_ctx.data; + head->ctx.data_meta = head->orig_ctx.data_meta; + head->ctx.data_end = head->orig_ctx.data_end; + xdp_update_frame_from_buff(&head->ctx, &head->frm); +} + +static int xdp_recv_frames(struct xdp_frame **frames, int nframes, + struct sk_buff **skbs, + struct net_device *dev) +{ + gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; + int i, n; + LIST_HEAD(list); + + n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); + if (unlikely(n == 0)) { + for (i = 0; i < nframes; i++) + xdp_return_frame(frames[i]); + return -ENOMEM; + } + + for (i = 0; i < nframes; i++) { + struct xdp_frame *xdpf = frames[i]; + struct sk_buff *skb = skbs[i]; + + skb = __xdp_build_skb_from_frame(xdpf, skb, dev); + if (!skb) { + xdp_return_frame(xdpf); + continue; + } + + list_add_tail(&skb->list, &list); + } + netif_receive_skb_list(&list); + + return 0; +} + +static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, + u32 repeat) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + int err = 0, act, ret, i, nframes = 0, batch_sz; + struct xdp_frame **frames = xdp->frames; + struct xdp_page_head *head; + struct xdp_frame *frm; + bool redirect = false; + struct xdp_buff *ctx; + struct page *page; + + batch_sz = min_t(u32, repeat, xdp->batch_size); + + local_bh_disable(); + xdp_set_return_frame_no_direct(); + + for (i = 0; i < batch_sz; i++) { + page = page_pool_dev_alloc_pages(xdp->pp); + if (!page) { + err = -ENOMEM; + goto out; + } + + head = phys_to_virt(page_to_phys(page)); + reset_ctx(head); + ctx = &head->ctx; + frm = &head->frm; + xdp->frame_cnt++; + + act = bpf_prog_run_xdp(prog, ctx); + + /* if program changed pkt bounds we need to update the xdp_frame */ + if (unlikely(ctx_was_changed(head))) { + ret = xdp_update_frame_from_buff(ctx, frm); + if (ret) { + xdp_return_buff(ctx); + continue; + } + } + + switch (act) { + case XDP_TX: + /* we can't do a real XDP_TX since we're not in the + * driver, so turn it into a REDIRECT back to the same + * index + */ + ri->tgt_index = xdp->dev->ifindex; + ri->map_id = INT_MAX; + ri->map_type = BPF_MAP_TYPE_UNSPEC; + fallthrough; + case XDP_REDIRECT: + redirect = true; + ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); + if (ret) + xdp_return_buff(ctx); + break; + case XDP_PASS: + frames[nframes++] = frm; + break; + default: + bpf_warn_invalid_xdp_action(NULL, prog, act); + fallthrough; + case XDP_DROP: + xdp_return_buff(ctx); + break; + } + } + +out: + if (redirect) + xdp_do_flush(); + if (nframes) { + ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); + if (ret) + err = ret; + } + + xdp_clear_return_frame_no_direct(); + local_bh_enable(); + return err; +} + +static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, + u32 repeat, u32 batch_size, u32 *time) + +{ + struct xdp_test_data xdp = { .batch_size = batch_size }; + struct bpf_test_timer t = { .mode = NO_MIGRATE }; + int ret; + + if (!repeat) + repeat = 1; + + ret = xdp_test_run_setup(&xdp, ctx); + if (ret) + return ret; + + bpf_test_timer_enter(&t); + do { + xdp.frame_cnt = 0; + ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); + if (unlikely(ret < 0)) + break; + } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); + bpf_test_timer_leave(&t); + + xdp_test_run_teardown(&xdp); + return ret; +} + static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *retval, u32 *time, bool xdp) { @@ -119,7 +401,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, *retval = bpf_prog_run_xdp(prog, ctx); else *retval = bpf_prog_run(prog, ctx); - } while (bpf_test_timer_continue(&t, repeat, &ret, time)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); bpf_reset_run_ctx(old_ctx); bpf_test_timer_leave(&t); @@ -446,7 +728,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, int b = 2, err = -EFAULT; u32 retval = 0; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; switch (prog->expected_attach_type) { @@ -510,7 +792,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, /* doesn't support data_in/out, ctx_out, duration, or repeat */ if (kattr->test.data_in || kattr->test.data_out || kattr->test.ctx_out || kattr->test.duration || - kattr->test.repeat) + kattr->test.repeat || kattr->test.batch_size) return -EINVAL; if (ctx_size_in < prog->aux->max_ctx_offset || @@ -741,7 +1023,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; data = bpf_test_init(kattr, kattr->test.data_size_in, @@ -922,7 +1204,9 @@ static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { + bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + u32 batch_size = kattr->test.batch_size; u32 size = kattr->test.data_size_in; u32 headroom = XDP_PACKET_HEADROOM; u32 retval, duration, max_data_sz; @@ -938,6 +1222,18 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, prog->expected_attach_type == BPF_XDP_CPUMAP) return -EINVAL; + if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) + return -EINVAL; + + if (do_live) { + if (!batch_size) + batch_size = NAPI_POLL_WEIGHT; + else if (batch_size > TEST_XDP_MAX_BATCH) + return -E2BIG; + } else if (batch_size) { + return -EINVAL; + } + ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -946,14 +1242,20 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, /* There can't be user provided data before the meta data */ if (ctx->data_meta || ctx->data_end != size || ctx->data > ctx->data_end || - unlikely(xdp_metalen_invalid(ctx->data))) + unlikely(xdp_metalen_invalid(ctx->data)) || + (do_live && (kattr->test.data_out || kattr->test.ctx_out))) goto free_ctx; /* Meta data is allocated from the headroom */ headroom -= ctx->data; } max_data_sz = 4096 - headroom - tailroom; - size = min_t(u32, size, max_data_sz); + if (size > max_data_sz) { + /* disallow live data mode for jumbo frames */ + if (do_live) + goto free_ctx; + size = max_data_sz; + } data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); if (IS_ERR(data)) { @@ -1011,7 +1313,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, if (repeat > 1) bpf_prog_change_xdp(NULL, prog); - ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); + if (do_live) + ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); + else + ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); /* We convert the xdp_buff back to an xdp_md before checking the return * code so the reference count of any held netdevice will be decremented * even if the test run failed. @@ -1073,7 +1378,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) return -EINVAL; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; if (size < ETH_HLEN) @@ -1108,7 +1413,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, do { retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, size, flags); - } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); bpf_test_timer_leave(&t); if (ret < 0) @@ -1140,7 +1445,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) return -EINVAL; - if (kattr->test.flags || kattr->test.cpu) + if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) return -EINVAL; if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || @@ -1203,7 +1508,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat do { ctx.selected_sk = NULL; retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); - } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); + } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); bpf_test_timer_leave(&t); if (ret < 0) @@ -1242,7 +1547,8 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ if (kattr->test.data_in || kattr->test.data_out || kattr->test.ctx_out || kattr->test.duration || - kattr->test.repeat || kattr->test.flags) + kattr->test.repeat || kattr->test.flags || + kattr->test.batch_size) return -EINVAL; if (ctx_size_in < prog->aux->max_ctx_offset || diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4eebea830613..bc23020b638d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1232,6 +1232,8 @@ enum { /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) +/* If set, XDP frames will be transmitted after processing */ +#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -1393,6 +1395,7 @@ union bpf_attr { __aligned_u64 ctx_out; __u32 flags; __u32 cpu; + __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ -- cgit v1.2.3-71-gd317 From 530e0d46c61314c59ecfdb8d3bcb87edbc0f85d3 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 9 Mar 2022 13:04:13 +0100 Subject: can: isotp: set default value for N_As to 50 micro seconds The N_As value describes the time a CAN frame needs on the wire when transmitted by the CAN controller. Even very short CAN FD frames need arround 100 usecs (bitrate 1Mbit/s, data bitrate 8Mbit/s). Having N_As to be zero (the former default) leads to 'no CAN frame separation' when STmin is set to zero by the receiving node. This 'burst mode' should not be enabled by default as it could potentially dump a high number of CAN frames into the netdev queue from the soft hrtimer context. This does not affect the system stability but is just not nice and cooperative. With this N_As/frame_txtime value the 'burst mode' is disabled by default. As user space applications usually do not set the frame_txtime element of struct can_isotp_options the new in-kernel default is very likely overwritten with zero when the sockopt() CAN_ISOTP_OPTS is invoked. To make sure that a N_As value of zero is only set intentional the value '0' is now interpreted as 'do not change the current value'. When a frame_txtime of zero is required for testing purposes this CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime. Link: https://lore.kernel.org/all/20220309120416.83514-2-socketcan@hartkopp.net Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can/isotp.h | 28 ++++++++++++++++++++++------ net/can/isotp.c | 12 +++++++++++- 2 files changed, 33 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index c55935b64ccc..590f8aea2b6d 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -137,20 +137,16 @@ struct can_isotp_ll_options { #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ #define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ -/* default values */ +/* protocol machine default values */ #define CAN_ISOTP_DEFAULT_FLAGS 0 #define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00 #define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */ -#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0 +#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */ #define CAN_ISOTP_DEFAULT_RECV_BS 0 #define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00 #define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0 -#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU -#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN -#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 - /* * Remark on CAN_ISOTP_DEFAULT_RECV_* values: * @@ -162,4 +158,24 @@ struct can_isotp_ll_options { * consistency and copied directly into the flow control (FC) frame. */ +/* link layer default values => make use of Classical CAN frames */ + +#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU +#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN +#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 + +/* + * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as + * it only makes sense for isotp implementation tests to run without + * a N_As value. As user space applications usually do not set the + * frame_txtime element of struct can_isotp_options the new in-kernel + * default is very likely overwritten with zero when the sockopt() + * CAN_ISOTP_OPTS is invoked. + * To make sure that a N_As value of zero is only set intentional the + * value '0' is now interpreted as 'do not change the current value'. + * When a frame_txtime of zero is required for testing purposes this + * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime. + */ +#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF + #endif /* !_UAPI_CAN_ISOTP_H */ diff --git a/net/can/isotp.c b/net/can/isotp.c index d59f1758ac9c..47404ba59981 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -140,6 +140,7 @@ struct isotp_sock { struct can_isotp_options opt; struct can_isotp_fc_options rxfc, txfc; struct can_isotp_ll_options ll; + u32 frame_txtime; u32 force_tx_stmin; u32 force_rx_stmin; u32 cfecho; /* consecutive frame echo tag */ @@ -360,7 +361,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae) so->tx_gap = ktime_set(0, 0); /* add transmission time for CAN frame N_As */ - so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime); + so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime); /* add waiting time for consecutive frames N_Cs */ if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN) so->tx_gap = ktime_add_ns(so->tx_gap, @@ -1293,6 +1294,14 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname, /* no separate rx_ext_address is given => use ext_address */ if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR)) so->opt.rx_ext_address = so->opt.ext_address; + + /* check for frame_txtime changes (0 => no changes) */ + if (so->opt.frame_txtime) { + if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO) + so->frame_txtime = 0; + else + so->frame_txtime = so->opt.frame_txtime; + } break; case CAN_ISOTP_RECV_FC: @@ -1498,6 +1507,7 @@ static int isotp_init(struct sock *sk) so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; + so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS; so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN; so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX; -- cgit v1.2.3-71-gd317 From e7a6c00dc77aedf27a601738ea509f1caea6d673 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 4 Mar 2022 08:22:22 -0700 Subject: io_uring: add support for registering ring file descriptors Lots of workloads use multiple threads, in which case the file table is shared between them. This makes getting and putting the ring file descriptor for each io_uring_enter(2) system call more expensive, as it involves an atomic get and put for each call. Similarly to how we allow registering normal file descriptors to avoid this overhead, add support for an io_uring_register(2) API that allows to register the ring fds themselves: 1) IORING_REGISTER_RING_FDS - takes an array of io_uring_rsrc_update structs, and registers them with the task. 2) IORING_UNREGISTER_RING_FDS - takes an array of io_uring_src_update structs, and unregisters them. When a ring fd is registered, it is internally represented by an offset. This offset is returned to the application, and the application then uses this offset and sets IORING_ENTER_REGISTERED_RING for the io_uring_enter(2) system call. This works just like using a registered file descriptor, rather than a real one, in an SQE, where IOSQE_FIXED_FILE gets set to tell io_uring that we're using an internal offset/descriptor rather than a real file descriptor. In initial testing, this provides a nice bump in performance for threaded applications in real world cases where the batch count (eg number of requests submitted per io_uring_enter(2) invocation) is low. In a microbenchmark, submitting NOP requests, we see the following increases in performance: Requests per syscall Baseline Registered Increase ---------------------------------------------------------------- 1 ~7030K ~8080K +15% 2 ~13120K ~14800K +13% 4 ~22740K ~25300K +11% Co-developed-by: Xiaoguang Wang Signed-off-by: Jens Axboe --- fs/io_uring.c | 182 ++++++++++++++++++++++++++++++++++++++++-- include/linux/io_uring.h | 5 +- include/uapi/linux/io_uring.h | 13 ++- 3 files changed, 190 insertions(+), 10 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index f8fd3b2bb30d..daa3609b742f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -466,6 +466,11 @@ struct io_ring_ctx { }; }; +/* + * Arbitrary limit, can be raised if need be + */ +#define IO_RINGFD_REG_MAX 16 + struct io_uring_task { /* submission side */ int cached_refs; @@ -481,6 +486,7 @@ struct io_uring_task { struct io_wq_work_list task_list; struct io_wq_work_list prior_task_list; struct callback_head task_work; + struct file **registered_rings; bool task_running; }; @@ -8788,8 +8794,16 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task, if (unlikely(!tctx)) return -ENOMEM; + tctx->registered_rings = kcalloc(IO_RINGFD_REG_MAX, + sizeof(struct file *), GFP_KERNEL); + if (unlikely(!tctx->registered_rings)) { + kfree(tctx); + return -ENOMEM; + } + ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); if (unlikely(ret)) { + kfree(tctx->registered_rings); kfree(tctx); return ret; } @@ -8798,6 +8812,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task, if (IS_ERR(tctx->io_wq)) { ret = PTR_ERR(tctx->io_wq); percpu_counter_destroy(&tctx->inflight); + kfree(tctx->registered_rings); kfree(tctx); return ret; } @@ -8822,6 +8837,7 @@ void __io_uring_free(struct task_struct *tsk) WARN_ON_ONCE(tctx->io_wq); WARN_ON_ONCE(tctx->cached_refs); + kfree(tctx->registered_rings); percpu_counter_destroy(&tctx->inflight); kfree(tctx); tsk->io_uring = NULL; @@ -10043,6 +10059,139 @@ void __io_uring_cancel(bool cancel_all) io_uring_cancel_generic(cancel_all, NULL); } +void io_uring_unreg_ringfd(void) +{ + struct io_uring_task *tctx = current->io_uring; + int i; + + for (i = 0; i < IO_RINGFD_REG_MAX; i++) { + if (tctx->registered_rings[i]) { + fput(tctx->registered_rings[i]); + tctx->registered_rings[i] = NULL; + } + } +} + +static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, + int start, int end) +{ + struct file *file; + int offset; + + for (offset = start; offset < end; offset++) { + offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); + if (tctx->registered_rings[offset]) + continue; + + file = fget(fd); + if (!file) { + return -EBADF; + } else if (file->f_op != &io_uring_fops) { + fput(file); + return -EOPNOTSUPP; + } + tctx->registered_rings[offset] = file; + return offset; + } + + return -EBUSY; +} + +/* + * Register a ring fd to avoid fdget/fdput for each io_uring_enter() + * invocation. User passes in an array of struct io_uring_rsrc_update + * with ->data set to the ring_fd, and ->offset given for the desired + * index. If no index is desired, application may set ->offset == -1U + * and we'll find an available index. Returns number of entries + * successfully processed, or < 0 on error if none were processed. + */ +static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update __user *arg = __arg; + struct io_uring_rsrc_update reg; + struct io_uring_task *tctx; + int ret, i; + + if (!nr_args || nr_args > IO_RINGFD_REG_MAX) + return -EINVAL; + + mutex_unlock(&ctx->uring_lock); + ret = io_uring_add_tctx_node(ctx); + mutex_lock(&ctx->uring_lock); + if (ret) + return ret; + + tctx = current->io_uring; + for (i = 0; i < nr_args; i++) { + int start, end; + + if (copy_from_user(®, &arg[i], sizeof(reg))) { + ret = -EFAULT; + break; + } + + if (reg.offset == -1U) { + start = 0; + end = IO_RINGFD_REG_MAX; + } else { + if (reg.offset >= IO_RINGFD_REG_MAX) { + ret = -EINVAL; + break; + } + start = reg.offset; + end = start + 1; + } + + ret = io_ring_add_registered_fd(tctx, reg.data, start, end); + if (ret < 0) + break; + + reg.offset = ret; + if (copy_to_user(&arg[i], ®, sizeof(reg))) { + fput(tctx->registered_rings[reg.offset]); + tctx->registered_rings[reg.offset] = NULL; + ret = -EFAULT; + break; + } + } + + return i ? i : ret; +} + +static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, + unsigned nr_args) +{ + struct io_uring_rsrc_update __user *arg = __arg; + struct io_uring_task *tctx = current->io_uring; + struct io_uring_rsrc_update reg; + int ret = 0, i; + + if (!nr_args || nr_args > IO_RINGFD_REG_MAX) + return -EINVAL; + if (!tctx) + return 0; + + for (i = 0; i < nr_args; i++) { + if (copy_from_user(®, &arg[i], sizeof(reg))) { + ret = -EFAULT; + break; + } + if (reg.offset >= IO_RINGFD_REG_MAX) { + ret = -EINVAL; + break; + } + + reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); + if (tctx->registered_rings[reg.offset]) { + fput(tctx->registered_rings[reg.offset]); + tctx->registered_rings[reg.offset] = NULL; + } + } + + return i ? i : ret; +} + static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff, size_t sz) { @@ -10173,12 +10322,28 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, io_run_task_work(); if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | - IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))) + IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_REGISTERED_RING))) return -EINVAL; - f = fdget(fd); - if (unlikely(!f.file)) - return -EBADF; + /* + * Ring fd has been registered via IORING_REGISTER_RING_FDS, we + * need only dereference our task private array to find it. + */ + if (flags & IORING_ENTER_REGISTERED_RING) { + struct io_uring_task *tctx = current->io_uring; + + if (!tctx || fd >= IO_RINGFD_REG_MAX) + return -EINVAL; + fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); + f.file = tctx->registered_rings[fd]; + if (unlikely(!f.file)) + return -EBADF; + } else { + f = fdget(fd); + if (unlikely(!f.file)) + return -EBADF; + } ret = -EOPNOTSUPP; if (unlikely(f.file->f_op != &io_uring_fops)) @@ -10252,7 +10417,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, out: percpu_ref_put(&ctx->refs); out_fput: - fdput(f); + if (!(flags & IORING_ENTER_REGISTERED_RING)) + fdput(f); return submitted ? submitted : ret; } @@ -11142,6 +11308,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_register_iowq_max_workers(ctx, arg); break; + case IORING_REGISTER_RING_FDS: + ret = io_ringfd_register(ctx, arg, nr_args); + break; + case IORING_UNREGISTER_RING_FDS: + ret = io_ringfd_unregister(ctx, arg, nr_args); + break; default: ret = -EINVAL; break; diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 649a4d7c241b..1814e698d861 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -9,11 +9,14 @@ struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +void io_uring_unreg_ringfd(void); static inline void io_uring_files_cancel(void) { - if (current->io_uring) + if (current->io_uring) { + io_uring_unreg_ringfd(); __io_uring_cancel(false); + } } static inline void io_uring_task_cancel(void) { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 787f491f0d2a..42b2fe84dbcd 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -257,10 +257,11 @@ struct io_cqring_offsets { /* * io_uring_enter(2) flags */ -#define IORING_ENTER_GETEVENTS (1U << 0) -#define IORING_ENTER_SQ_WAKEUP (1U << 1) -#define IORING_ENTER_SQ_WAIT (1U << 2) -#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_GETEVENTS (1U << 0) +#define IORING_ENTER_SQ_WAKEUP (1U << 1) +#define IORING_ENTER_SQ_WAIT (1U << 2) +#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_REGISTERED_RING (1U << 4) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -325,6 +326,10 @@ enum { /* set/get max number of io-wq workers */ IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + /* register/unregister io_uring fd with the ring */ + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + /* this goes last */ IORING_REGISTER_LAST }; -- cgit v1.2.3-71-gd317 From 4f57f06ce2186c31c3da52386125dc57b1cd6f96 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Mar 2022 06:27:26 -0700 Subject: io_uring: add support for IORING_OP_MSG_RING command This adds support for IORING_OP_MSG_RING, which allows an SQE to signal another ring. That allows either waking up someone waiting on the ring, or even passing a 64-bit value via the user_data field in the CQE. sqe->fd must contain the fd of a ring that should receive the CQE. sqe->off will be propagated to the cqe->user_data on the target ring, and sqe->len will be propagated to cqe->res. The results CQE will have IORING_CQE_F_MSG set in its flags, to indicate that this CQE was generated from a messaging request rather than a SQE issued locally on that ring. This effectively allows passing a 64-bit and a 32-bit quantify between the two rings. This request type has the following request specific error cases: - -EBADFD. Set if the sqe->fd doesn't point to a file descriptor that is of the io_uring type. - -EOVERFLOW. Set if we were not able to deliver a request to the target ring. Signed-off-by: Jens Axboe --- fs/io_uring.c | 55 +++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/io_uring.h | 3 +++ 2 files changed, 58 insertions(+) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 0f5e999e569f..36b001365a79 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -706,6 +706,12 @@ struct io_hardlink { int flags; }; +struct io_msg { + struct file *file; + u64 user_data; + u32 len; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -871,6 +877,7 @@ struct io_kiocb { struct io_mkdir mkdir; struct io_symlink symlink; struct io_hardlink hardlink; + struct io_msg msg; }; u8 opcode; @@ -1121,6 +1128,9 @@ static const struct io_op_def io_op_defs[] = { [IORING_OP_MKDIRAT] = {}, [IORING_OP_SYMLINKAT] = {}, [IORING_OP_LINKAT] = {}, + [IORING_OP_MSG_RING] = { + .needs_file = 1, + }, }; /* requests with any of those set should undergo io_disarm_next() */ @@ -4322,6 +4332,46 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) return 0; } +static int io_msg_ring_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index || + sqe->rw_flags || sqe->splice_fd_in || sqe->buf_index || + sqe->personality)) + return -EINVAL; + + if (req->file->f_op != &io_uring_fops) + return -EBADFD; + + req->msg.user_data = READ_ONCE(sqe->off); + req->msg.len = READ_ONCE(sqe->len); + return 0; +} + +static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *target_ctx; + struct io_msg *msg = &req->msg; + int ret = -EOVERFLOW; + bool filled; + + target_ctx = req->file->private_data; + + spin_lock(&target_ctx->completion_lock); + filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, + IORING_CQE_F_MSG); + io_commit_cqring(target_ctx); + spin_unlock(&target_ctx->completion_lock); + + if (filled) { + io_cqring_ev_posted(target_ctx); + ret = 0; + } + + __io_req_complete(req, issue_flags, ret, 0); + return 0; +} + static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; @@ -6700,6 +6750,8 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_symlinkat_prep(req, sqe); case IORING_OP_LINKAT: return io_linkat_prep(req, sqe); + case IORING_OP_MSG_RING: + return io_msg_ring_prep(req, sqe); } printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", @@ -6983,6 +7035,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) case IORING_OP_LINKAT: ret = io_linkat(req, issue_flags); break; + case IORING_OP_MSG_RING: + ret = io_msg_ring(req, issue_flags); + break; default: ret = -EINVAL; break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 42b2fe84dbcd..8bd4bfdd9a89 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -143,6 +143,7 @@ enum { IORING_OP_MKDIRAT, IORING_OP_SYMLINKAT, IORING_OP_LINKAT, + IORING_OP_MSG_RING, /* this goes last, obviously */ IORING_OP_LAST, @@ -199,9 +200,11 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries + * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) +#define IORING_CQE_F_MSG (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, -- cgit v1.2.3-71-gd317 From 153474ba1a4aed0a7b797b4c2be8c35c7a4e57bd Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 11:46:37 -0600 Subject: ptrace: Create ptrace_report_syscall_{entry,exit} in ptrace.h Rename tracehook_report_syscall_{entry,exit} to ptrace_report_syscall_{entry,exit} and place them in ptrace.h There is no longer any generic tracehook infractructure so make these ptrace specific functions ptrace specific. Reviewed-by: Kees Cook Link: https://lkml.kernel.org/r/20220309162454.123006-3-ebiederm@xmission.com Signed-off-by: "Eric W. Biederman" --- arch/Kconfig | 2 +- arch/alpha/kernel/ptrace.c | 5 ++-- arch/arc/kernel/ptrace.c | 5 ++-- arch/arm/kernel/ptrace.c | 5 ++-- arch/arm64/kernel/ptrace.c | 7 +++-- arch/csky/kernel/ptrace.c | 5 ++-- arch/h8300/kernel/ptrace.c | 5 ++-- arch/hexagon/kernel/traps.c | 6 ++--- arch/ia64/kernel/ptrace.c | 4 +-- arch/m68k/kernel/ptrace.c | 6 ++--- arch/microblaze/kernel/ptrace.c | 5 ++-- arch/mips/kernel/ptrace.c | 5 ++-- arch/nds32/include/asm/syscall.h | 2 +- arch/nds32/kernel/ptrace.c | 5 ++-- arch/nios2/kernel/ptrace.c | 5 ++-- arch/openrisc/kernel/ptrace.c | 5 ++-- arch/parisc/kernel/ptrace.c | 7 +++-- arch/powerpc/kernel/ptrace/ptrace.c | 8 +++--- arch/riscv/kernel/ptrace.c | 5 ++-- arch/sh/kernel/ptrace_32.c | 5 ++-- arch/sparc/kernel/ptrace_32.c | 5 ++-- arch/sparc/kernel/ptrace_64.c | 5 ++-- arch/um/kernel/ptrace.c | 5 ++-- arch/xtensa/kernel/ptrace.c | 5 ++-- include/asm-generic/syscall.h | 2 +- include/linux/entry-common.h | 6 ++--- include/linux/ptrace.h | 51 +++++++++++++++++++++++++++++++++++++ include/linux/tracehook.h | 51 ------------------------------------- include/uapi/linux/ptrace.h | 2 +- kernel/entry/common.c | 1 + 30 files changed, 109 insertions(+), 126 deletions(-) (limited to 'include/uapi/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 678a80713b21..a517a949eb1d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -217,7 +217,7 @@ config TRACE_IRQFLAGS_SUPPORT # asm/syscall.h supplying asm-generic/syscall.h interface # linux/regset.h user_regset interfaces # CORE_DUMP_USE_REGSET #define'd in linux/elf.h -# TIF_SYSCALL_TRACE calls tracehook_report_syscall_{entry,exit} +# TIF_SYSCALL_TRACE calls ptrace_report_syscall_{entry,exit} # TIF_NOTIFY_RESUME calls tracehook_notify_resume() # signal delivery calls tracehook_signal_handler() # diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index 8c43212ae38e..a1a239ea002d 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -323,7 +322,7 @@ asmlinkage unsigned long syscall_trace_enter(void) unsigned long ret = 0; struct pt_regs *regs = current_pt_regs(); if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(current_pt_regs())) + ptrace_report_syscall_entry(current_pt_regs())) ret = -1UL; audit_syscall_entry(regs->r0, regs->r16, regs->r17, regs->r18, regs->r19); return ret ?: current_pt_regs()->r0; @@ -334,5 +333,5 @@ syscall_trace_leave(void) { audit_syscall_exit(current_pt_regs()); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(current_pt_regs(), 0); + ptrace_report_syscall_exit(current_pt_regs(), 0); } diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 883391977fdf..54b419ac8bda 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include @@ -258,7 +257,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage int syscall_trace_entry(struct pt_regs *regs) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return ULONG_MAX; return regs->r8; @@ -266,5 +265,5 @@ asmlinkage int syscall_trace_entry(struct pt_regs *regs) asmlinkage void syscall_trace_exit(struct pt_regs *regs) { - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index e5aa3237853d..bfe88c6e60d5 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -843,8 +842,8 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) regs->ARM_ip = dir; if (dir == PTRACE_SYSCALL_EXIT) - tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_exit(regs, 0); + else if (ptrace_report_syscall_entry(regs)) current_thread_info()->abi_syscall = -1; regs->ARM_ip = ip; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index b7845575f86f..230a47b9189e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -1818,11 +1817,11 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) regs->regs[regno] = dir; if (dir == PTRACE_SYSCALL_ENTER) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) forget_syscall(regs); regs->regs[regno] = saved_reg; } else if (!test_thread_flag(TIF_SINGLESTEP)) { - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); regs->regs[regno] = saved_reg; } else { regs->regs[regno] = saved_reg; @@ -1832,7 +1831,7 @@ static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) * tracer modifications to the registers may have rewound the * state machine. */ - tracehook_report_syscall_exit(regs, 1); + ptrace_report_syscall_exit(regs, 1); } } diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 1a5f54e0d272..0f7e7b653c72 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -321,7 +320,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; if (secure_computing() == -1) @@ -339,7 +338,7 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, syscall_get_return_value(current, regs)); diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c index a11db009d0ea..a9898b27b756 100644 --- a/arch/h8300/kernel/ptrace.c +++ b/arch/h8300/kernel/ptrace.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include @@ -174,7 +173,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -196,5 +195,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 1240f038cce0..6447763ce5a9 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -348,7 +348,7 @@ void do_trap0(struct pt_regs *regs) /* allow strace to catch syscall args */ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs))) + ptrace_report_syscall_entry(regs))) return; /* return -ENOSYS somewhere? */ /* Interrupts should be re-enabled for syscall processing */ @@ -386,7 +386,7 @@ void do_trap0(struct pt_regs *regs) /* allow strace to get the syscall return state */ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE))) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); break; case TRAP_DEBUG: diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6a1439eaa050..6af64aae087d 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -1217,7 +1217,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, struct pt_regs regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(®s)) + if (ptrace_report_syscall_entry(®s)) return -ENOSYS; /* copy user rbs to kernel rbs */ @@ -1243,7 +1243,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(®s, step); + ptrace_report_syscall_exit(®s, step); /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index aa3a0b8d07e9..a0c99fe3118e 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -282,13 +282,13 @@ asmlinkage int syscall_trace_enter(void) int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(task_pt_regs(current)); + ret = ptrace_report_syscall_entry(task_pt_regs(current)); return ret; } asmlinkage void syscall_trace_leave(void) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(task_pt_regs(current), 0); + ptrace_report_syscall_exit(task_pt_regs(current), 0); } #endif /* CONFIG_COLDFIRE */ diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c index badd286882ae..5234d0c1dcaa 100644 --- a/arch/microblaze/kernel/ptrace.c +++ b/arch/microblaze/kernel/ptrace.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -140,7 +139,7 @@ asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs) secure_computing_strict(regs->r12); if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -161,7 +160,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } void ptrace_disable(struct task_struct *child) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index db7c5be1d4a3..567aec4abac0 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -1317,7 +1316,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) current_thread_info()->syscall = syscall; if (test_thread_flag(TIF_SYSCALL_TRACE)) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; syscall = current_thread_info()->syscall; } @@ -1376,7 +1375,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); user_enter(); } diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h index 90aa56c94af1..04d55ce18d50 100644 --- a/arch/nds32/include/asm/syscall.h +++ b/arch/nds32/include/asm/syscall.h @@ -39,7 +39,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) * * It's only valid to call this when @task is stopped for system * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT), - * after tracehook_report_syscall_entry() returned nonzero to prevent + * after ptrace_report_syscall_entry() returned nonzero to prevent * the system call from taking place. * * This rolls back the register state in @regs so it's as if the diff --git a/arch/nds32/kernel/ptrace.c b/arch/nds32/kernel/ptrace.c index d0eda870fbc2..6a6988cf689d 100644 --- a/arch/nds32/kernel/ptrace.c +++ b/arch/nds32/kernel/ptrace.c @@ -3,7 +3,6 @@ #include #include -#include #include #include @@ -103,7 +102,7 @@ void user_disable_single_step(struct task_struct *child) asmlinkage int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) { - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) forget_syscall(regs); } return regs->syscallno; @@ -113,6 +112,6 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { int step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c index a6ea9e1b4f61..cd62f310778b 100644 --- a/arch/nios2/kernel/ptrace.c +++ b/arch/nios2/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include @@ -134,7 +133,7 @@ asmlinkage int do_syscall_trace_enter(void) int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(task_pt_regs(current)); + ret = ptrace_report_syscall_entry(task_pt_regs(current)); return ret; } @@ -142,5 +141,5 @@ asmlinkage int do_syscall_trace_enter(void) asmlinkage void do_syscall_trace_exit(void) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(task_pt_regs(current), 0); + ptrace_report_syscall_exit(task_pt_regs(current), 0); } diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 4d60ae2a12fa..b971740fc2aa 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -159,7 +158,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + ptrace_report_syscall_entry(regs)) /* * Tracing decided this syscall should not happen. * We'll return a bogus call number to get an ENOSYS @@ -181,5 +180,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 65de6c4c9354..96ef6a6b66e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -316,7 +315,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) { - int rc = tracehook_report_syscall_entry(regs); + int rc = ptrace_report_syscall_entry(regs); /* * As tracesys_next does not set %r28 to -ENOSYS @@ -327,7 +326,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) if (rc) { /* * A nonzero return code from - * tracehook_report_syscall_entry() tells us + * ptrace_report_syscall_entry() tells us * to prevent the syscall execution. Skip * the syscall call and the syscall restart handling. * @@ -381,7 +380,7 @@ void do_syscall_trace_exit(struct pt_regs *regs) #endif if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, stepping); + ptrace_report_syscall_exit(regs, stepping); } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index c43f77e2ac31..f394b0d6473f 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -16,7 +16,7 @@ */ #include -#include +#include #include #include #include @@ -263,12 +263,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); if (flags) { - int rc = tracehook_report_syscall_entry(regs); + int rc = ptrace_report_syscall_entry(regs); if (unlikely(flags & _TIF_SYSCALL_EMU)) { /* * A nonzero return code from - * tracehook_report_syscall_entry() tells us to prevent + * ptrace_report_syscall_entry() tells us to prevent * the syscall execution, but we are not going to * execute it anyway. * @@ -334,7 +334,7 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } void __init pt_regs_check(void); diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index a89243730153..793c7da0554b 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -17,7 +17,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -241,7 +240,7 @@ long arch_ptrace(struct task_struct *child, long request, __visible int do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) - if (tracehook_report_syscall_entry(regs)) + if (ptrace_report_syscall_entry(regs)) return -1; /* @@ -266,7 +265,7 @@ __visible void do_syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 5281685f6ad1..d417988d9770 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -456,7 +455,7 @@ long arch_ptrace(struct task_struct *child, long request, asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + ptrace_report_syscall_entry(regs)) { regs->regs[0] = -ENOSYS; return -1; } @@ -484,5 +483,5 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 5318174a0268..e7db48acb838 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -439,9 +438,9 @@ asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p) if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (syscall_exit_p) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); else - ret = tracehook_report_syscall_entry(regs); + ret = ptrace_report_syscall_entry(regs); } return ret; diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 2b92155db8a5..86a7eb5c27ba 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -1095,7 +1094,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) user_exit(); if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(regs); + ret = ptrace_report_syscall_entry(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->u_regs[UREG_G1]); @@ -1118,7 +1117,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) trace_sys_exit(regs, regs->u_regs[UREG_I0]); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); if (test_thread_flag(TIF_NOHZ)) user_enter(); diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index b425f47bddbb..bfaf6ab1ac03 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -135,7 +134,7 @@ int syscall_trace_enter(struct pt_regs *regs) if (!test_thread_flag(TIF_SYSCALL_TRACE)) return 0; - return tracehook_report_syscall_entry(regs); + return ptrace_report_syscall_entry(regs); } void syscall_trace_leave(struct pt_regs *regs) @@ -151,7 +150,7 @@ void syscall_trace_leave(struct pt_regs *regs) if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - tracehook_report_syscall_exit(regs, 0); + ptrace_report_syscall_exit(regs, 0); /* force do_signal() --> is_syscall() */ if (ptraced & PT_PTRACED) set_thread_flag(TIF_SIGPENDING); diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index bb3f4797d212..323c678a691f 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #define CREATE_TRACE_POINTS @@ -550,7 +549,7 @@ int do_syscall_trace_enter(struct pt_regs *regs) regs->areg[2] = -ENOSYS; if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + ptrace_report_syscall_entry(regs)) { regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; return 0; @@ -583,5 +582,5 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 81695eb02a12..5a80fe728dc8 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -44,7 +44,7 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); * * It's only valid to call this when @task is stopped for system * call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_AUDIT), after tracehook_report_syscall_entry() + * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry() * returned nonzero to prevent the system call from taking place. * * This rolls back the register state in @regs so it's as if the diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 2e2b8d6140ed..a670e9fba7a9 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -3,7 +3,7 @@ #define __LINUX_ENTRYCOMMON_H #include -#include +#include #include #include #include @@ -95,7 +95,7 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs #ifndef arch_syscall_enter_tracehook static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) { - return tracehook_report_syscall_entry(regs); + return ptrace_report_syscall_entry(regs); } #endif @@ -294,7 +294,7 @@ static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); #ifndef arch_syscall_exit_tracehook static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) { - tracehook_report_syscall_exit(regs, step); + ptrace_report_syscall_exit(regs, step); } #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 91b1074edb4c..5310f43e4762 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -440,4 +440,55 @@ static inline int ptrace_report_syscall(unsigned long message) current->ptrace_message = 0; return fatal_signal_pending(current); } + +/** + * ptrace_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or + * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just + * entered the kernel for a system call. Full user register state is + * available here. Changing the values in @regs can affect the system + * call number and arguments to be tried. It is safe to block here, + * preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int ptrace_report_syscall_entry( + struct pt_regs *regs) +{ + return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); +} + +/** + * ptrace_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when + * the current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step) +{ + if (step) + user_single_step_report(regs); + else + ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); +} #endif diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 998bc3863559..819e82ac09bd 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -52,57 +52,6 @@ struct linux_binprm; -/** - * tracehook_report_syscall_entry - task is about to attempt a system call - * @regs: user register state of current task - * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or - * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just - * entered the kernel for a system call. Full user register state is - * available here. Changing the values in @regs can affect the system - * call number and arguments to be tried. It is safe to block here, - * preventing the system call from beginning. - * - * Returns zero normally, or nonzero if the calling arch code should abort - * the system call. That must prevent normal entry so no system call is - * made. If @task ever returns to user mode after this, its register state - * is unspecified, but should be something harmless like an %ENOSYS error - * return. It should preserve enough information so that syscall_rollback() - * can work (see asm-generic/syscall.h). - * - * Called without locks, just after entering kernel mode. - */ -static inline __must_check int tracehook_report_syscall_entry( - struct pt_regs *regs) -{ - return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); -} - -/** - * tracehook_report_syscall_exit - task has just finished a system call - * @regs: user register state of current task - * @step: nonzero if simulating single-step or block-step - * - * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when - * the current task has just finished an attempted system call. Full - * user register state is available here. It is safe to block here, - * preventing signals from being processed. - * - * If @step is nonzero, this report is also in lieu of the normal - * trap that would follow the system call instruction because - * user_enable_block_step() or user_enable_single_step() was used. - * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. - * - * Called without locks, just before checking for pending signals. - */ -static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) -{ - if (step) - user_single_step_report(regs); - else - ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); -} - /** * tracehook_signal_handler - signal handler setup is complete * @stepping: nonzero if debugger single-step or block-step in use diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index 3747bf816f9a..b7af92e07d1f 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -114,7 +114,7 @@ struct ptrace_rseq_configuration { /* * These values are stored in task->ptrace_message - * by tracehook_report_syscall_* to describe the current syscall-stop. + * by ptrace_report_syscall_* to describe the current syscall-stop. */ #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 diff --git a/kernel/entry/common.c b/kernel/entry/common.c index bad713684c2e..f52e57c4d6d8 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include -- cgit v1.2.3-71-gd317 From bcbb7bf6ccde7cb969a5642879832bc84ebf06a3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Mar 2022 12:59:35 -0700 Subject: io_uring: allow submissions to continue on error By default, io_uring will stop submitting a batch of requests if we run into an error submitting a request. This isn't strictly necessary, as the error result is passed out-of-band via a CQE anyway. And it can be a bit confusing for some applications. Provide a way to setup a ring that will continue submitting on error, when the error CQE has been posted. There's still one case that will break out of submission. If we fail allocating a request, then we'll still return -ENOMEM. We could in theory post a CQE for that condition too even if we never got a request. Leave that for a potential followup. Reported-by: Dylan Yudaken Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 +++++++++--- include/uapi/linux/io_uring.h | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 3145c9cacee0..229b31d644ef 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7801,8 +7801,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) } /* will complete beyond this point, count as submitted */ submitted++; - if (io_submit_sqe(ctx, req, sqe)) - break; + if (io_submit_sqe(ctx, req, sqe)) { + /* + * Continue submitting even for sqe failure if the + * ring was setup with IORING_SETUP_SUBMIT_ALL + */ + if (!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) + break; + } } while (submitted < nr); if (unlikely(submitted != nr)) { @@ -11265,7 +11271,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | - IORING_SETUP_R_DISABLED)) + IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8bd4bfdd9a89..d2be4eb22008 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -101,6 +101,7 @@ enum { #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ +#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ enum { IORING_OP_NOP, -- cgit v1.2.3-71-gd317 From 9bb984f28d5bcb917d35d930fcfb89f90f9449fd Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 9 Mar 2022 01:05:09 -0800 Subject: bpf: Remove BPF_SKB_DELIVERY_TIME_NONE and rename s/delivery_time_/tstamp_/ This patch is to simplify the uapi bpf.h regarding to the tstamp type and use a similar way as the kernel to describe the value stored in __sk_buff->tstamp. My earlier thought was to avoid describing the semantic and clock base for the rcv timestamp until there is more clarity on the use case, so the __sk_buff->delivery_time_type naming instead of __sk_buff->tstamp_type. With some thoughts, it can reuse the UNSPEC naming. This patch first removes BPF_SKB_DELIVERY_TIME_NONE and also rename BPF_SKB_DELIVERY_TIME_UNSPEC to BPF_SKB_TSTAMP_UNSPEC and BPF_SKB_DELIVERY_TIME_MONO to BPF_SKB_TSTAMP_DELIVERY_MONO. The semantic of BPF_SKB_TSTAMP_DELIVERY_MONO is the same: __sk_buff->tstamp has delivery time in mono clock base. BPF_SKB_TSTAMP_UNSPEC means __sk_buff->tstamp has the (rcv) tstamp at ingress and the delivery time at egress. At egress, the clock base could be found from skb->sk->sk_clockid. __sk_buff->tstamp == 0 naturally means NONE, so NONE is not needed. With BPF_SKB_TSTAMP_UNSPEC for the rcv tstamp at ingress, the __sk_buff->delivery_time_type is also renamed to __sk_buff->tstamp_type which was also suggested in the earlier discussion: https://lore.kernel.org/bpf/b181acbe-caf8-502d-4b7b-7d96b9fc5d55@iogearbox.net/ The above will then make __sk_buff->tstamp and __sk_buff->tstamp_type the same as its kernel skb->tstamp and skb->mono_delivery_time counter part. The internal kernel function bpf_skb_convert_dtime_type_read() is then renamed to bpf_skb_convert_tstamp_type_read() and it can be simplified with the BPF_SKB_DELIVERY_TIME_NONE gone. A BPF_ALU32_IMM(BPF_AND) insn is also saved by using BPF_JMP32_IMM(BPF_JSET). The bpf helper bpf_skb_set_delivery_time() is also renamed to bpf_skb_set_tstamp(). The arg name is changed from dtime to tstamp also. It only allows setting tstamp 0 for BPF_SKB_TSTAMP_UNSPEC and it could be relaxed later if there is use case to change mono delivery time to non mono. prog->delivery_time_access is also renamed to prog->tstamp_type_access. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220309090509.3712315-1-kafai@fb.com --- include/linux/filter.h | 2 +- include/uapi/linux/bpf.h | 40 ++++++++++--------- net/core/filter.c | 88 ++++++++++++++++-------------------------- tools/include/uapi/linux/bpf.h | 40 ++++++++++--------- 4 files changed, 77 insertions(+), 93 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 9bf26307247f..05ed9bd31b45 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -573,7 +573,7 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ - delivery_time_access:1; /* Accessed __sk_buff->delivery_time_type */ + tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; diff --git a/net/core/filter.c b/net/core/filter.c index f914e4b13b18..03655f2074ae 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7388,36 +7388,36 @@ static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_skb_set_delivery_time, struct sk_buff *, skb, - u64, dtime, u32, dtime_type) +BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, + u64, tstamp, u32, tstamp_type) { /* skb_clear_delivery_time() is done for inet protocol */ if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -EOPNOTSUPP; - switch (dtime_type) { - case BPF_SKB_DELIVERY_TIME_MONO: - if (!dtime) + switch (tstamp_type) { + case BPF_SKB_TSTAMP_DELIVERY_MONO: + if (!tstamp) return -EINVAL; - skb->tstamp = dtime; + skb->tstamp = tstamp; skb->mono_delivery_time = 1; break; - case BPF_SKB_DELIVERY_TIME_NONE: - if (dtime) + case BPF_SKB_TSTAMP_UNSPEC: + if (tstamp) return -EINVAL; skb->tstamp = 0; skb->mono_delivery_time = 0; break; default: - return -EOPNOTSUPP; + return -EINVAL; } return 0; } -static const struct bpf_func_proto bpf_skb_set_delivery_time_proto = { - .func = bpf_skb_set_delivery_time, +static const struct bpf_func_proto bpf_skb_set_tstamp_proto = { + .func = bpf_skb_set_tstamp, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, @@ -7786,8 +7786,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_gen_syncookie_proto; case BPF_FUNC_sk_assign: return &bpf_sk_assign_proto; - case BPF_FUNC_skb_set_delivery_time: - return &bpf_skb_set_delivery_time_proto; + case BPF_FUNC_skb_set_tstamp: + return &bpf_skb_set_tstamp_proto; #endif default: return bpf_sk_base_func_proto(func_id); @@ -8127,9 +8127,9 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): return false; - case offsetofend(struct __sk_buff, delivery_time_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: + case offsetofend(struct __sk_buff, tstamp_type) ... offsetof(struct __sk_buff, hwtstamp) - 1: /* Explicitly prohibit access to padding in __sk_buff. */ return false; default: @@ -8484,14 +8484,14 @@ static bool tc_cls_act_is_valid_access(int off, int size, break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; - case offsetof(struct __sk_buff, delivery_time_type): + case offsetof(struct __sk_buff, tstamp_type): /* The convert_ctx_access() on reading and writing * __sk_buff->tstamp depends on whether the bpf prog - * has used __sk_buff->delivery_time_type or not. - * Thus, we need to set prog->delivery_time_access + * has used __sk_buff->tstamp_type or not. + * Thus, we need to set prog->tstamp_type_access * earlier during is_valid_access() here. */ - ((struct bpf_prog *)prog)->delivery_time_access = 1; + ((struct bpf_prog *)prog)->tstamp_type_access = 1; return size == sizeof(__u8); } @@ -8888,42 +8888,22 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, return insn - insn_buf; } -static struct bpf_insn *bpf_convert_dtime_type_read(const struct bpf_insn *si, - struct bpf_insn *insn) +static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, + struct bpf_insn *insn) { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; + /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_MONO */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_MONO); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 10 : 5); - - *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, skb_reg, - offsetof(struct sk_buff, tstamp)); - *insn++ = BPF_JMP_IMM(BPF_JNE, tmp_reg, 0, 2); - /* value_reg = BPF_SKB_DELIVERY_TIME_NONE */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_NONE); - *insn++ = BPF_JMP_A(IS_ENABLED(CONFIG_NET_CLS_ACT) ? 6 : 1); - -#ifdef CONFIG_NET_CLS_ACT - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JEQ, tmp_reg, 0, 2); - /* At ingress, value_reg = 0 */ - *insn++ = BPF_MOV32_IMM(value_reg, 0); + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, + SKB_MONO_DELIVERY_TIME_MASK, 2); + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); *insn++ = BPF_JMP_A(1); -#endif - - /* value_reg = BPF_SKB_DELIVERYT_TIME_UNSPEC */ - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_DELIVERY_TIME_UNSPEC); + *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); - /* 15 insns with CONFIG_NET_CLS_ACT */ return insn; } @@ -8956,11 +8936,11 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, __u8 skb_reg = si->src_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, read skb->tstamp as is if delivery_time_access is true. + * Thus, read skb->tstamp as is if tstamp_type_access is true. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; @@ -8990,13 +8970,13 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, __u8 skb_reg = si->dst_reg; #ifdef CONFIG_NET_CLS_ACT - /* If the delivery_time_type is read, + /* If the tstamp_type is read, * the bpf prog is aware the tstamp could have delivery time. - * Thus, write skb->tstamp as is if delivery_time_access is true. + * Thus, write skb->tstamp as is if tstamp_type_access is true. * Otherwise, writing at ingress will have to clear the * mono_delivery_time bit also. */ - if (!prog->delivery_time_access) { + if (!prog->tstamp_type_access) { __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); @@ -9329,8 +9309,8 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, insn = bpf_convert_tstamp_read(prog, si, insn); break; - case offsetof(struct __sk_buff, delivery_time_type): - insn = bpf_convert_dtime_type_read(si, insn); + case offsetof(struct __sk_buff, tstamp_type): + insn = bpf_convert_tstamp_type_read(si, insn); break; case offsetof(struct __sk_buff, gso_segs): diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bc23020b638d..d288a0a9f797 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5090,23 +5090,22 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. On error * *dst* buffer is zeroed out. * - * long bpf_skb_set_delivery_time(struct sk_buff *skb, u64 dtime, u32 dtime_type) + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) * Description - * Set a *dtime* (delivery time) to the __sk_buff->tstamp and also - * change the __sk_buff->delivery_time_type to *dtime_type*. + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. * - * When setting a delivery time (non zero *dtime*) to - * __sk_buff->tstamp, only BPF_SKB_DELIVERY_TIME_MONO *dtime_type* - * is supported. It is the only delivery_time_type that will be - * kept after bpf_redirect_*(). - * - * If there is no need to change the __sk_buff->delivery_time_type, - * the delivery time can be directly written to __sk_buff->tstamp + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp * instead. * - * *dtime* 0 and *dtime_type* BPF_SKB_DELIVERY_TIME_NONE - * can be used to clear any delivery time stored in - * __sk_buff->tstamp. + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. * * Only IPv4 and IPv6 skb->protocol are supported. * @@ -5119,7 +5118,7 @@ union bpf_attr { * Return * 0 on success. * **-EINVAL** for invalid input - * **-EOPNOTSUPP** for unsupported delivery_time_type and protocol + * **-EOPNOTSUPP** for unsupported protocol */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,7 +5313,7 @@ union bpf_attr { FN(xdp_load_bytes), \ FN(xdp_store_bytes), \ FN(copy_from_user_task), \ - FN(skb_set_delivery_time), \ + FN(skb_set_tstamp), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5505,9 +5504,12 @@ union { \ } __attribute__((aligned(8))) enum { - BPF_SKB_DELIVERY_TIME_NONE, - BPF_SKB_DELIVERY_TIME_UNSPEC, - BPF_SKB_DELIVERY_TIME_MONO, + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ }; /* user accessible mirror of in-kernel sk_buff. @@ -5550,7 +5552,7 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u8 delivery_time_type; + __u8 tstamp_type; __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; -- cgit v1.2.3-71-gd317 From 58617014405ad5c9f94f464444f4972dabb71ca7 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Thu, 10 Mar 2022 23:53:35 +0800 Subject: bpf: Fix comment for helper bpf_current_task_under_cgroup() Fix the descriptions of the return values of helper bpf_current_task_under_cgroup(). Fixes: c6b5fb8690fa ("bpf: add documentation for eBPF helpers (42-50)") Signed-off-by: Hengqi Chen Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220310155335.1278783-1-hengqi.chen@gmail.com --- include/uapi/linux/bpf.h | 4 ++-- tools/include/uapi/linux/bpf.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d288a0a9f797..e9978a916c3e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2302,8 +2302,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d288a0a9f797..e9978a916c3e 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2302,8 +2302,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) -- cgit v1.2.3-71-gd317 From 174b16946e39ebd369097e0f773536c91a8c1a4c Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 2 Mar 2022 12:13:58 +0100 Subject: bpf-lsm: Introduce new helper bpf_ima_file_hash() ima_file_hash() has been modified to calculate the measurement of a file on demand, if it has not been already performed by IMA or the measurement is not fresh. For compatibility reasons, ima_inode_hash() remains unchanged. Keep the same approach in eBPF and introduce the new helper bpf_ima_file_hash() to take advantage of the modified behavior of ima_file_hash(). Signed-off-by: Roberto Sassu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220302111404.193900-4-roberto.sassu@huawei.com --- include/uapi/linux/bpf.h | 11 +++++++++++ kernel/bpf/bpf_lsm.c | 20 ++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 11 +++++++++++ 3 files changed, 42 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e9978a916c3e..99fab54ae9c0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5119,6 +5119,16 @@ union bpf_attr { * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol + * + * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) + * Description + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * Return + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,6 +5324,7 @@ union bpf_attr { FN(xdp_store_bytes), \ FN(copy_from_user_task), \ FN(skb_set_tstamp), \ + FN(ima_file_hash), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 9e4ecc990647..e8d27af5bbcc 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -99,6 +99,24 @@ static const struct bpf_func_proto bpf_ima_inode_hash_proto = { .allowed = bpf_ima_inode_hash_allowed, }; +BPF_CALL_3(bpf_ima_file_hash, struct file *, file, void *, dst, u32, size) +{ + return ima_file_hash(file, dst, size); +} + +BTF_ID_LIST_SINGLE(bpf_ima_file_hash_btf_ids, struct, file) + +static const struct bpf_func_proto bpf_ima_file_hash_proto = { + .func = bpf_ima_file_hash, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg1_btf_id = &bpf_ima_file_hash_btf_ids[0], + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE, + .allowed = bpf_ima_inode_hash_allowed, +}; + static const struct bpf_func_proto * bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -121,6 +139,8 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_bprm_opts_set_proto; case BPF_FUNC_ima_inode_hash: return prog->aux->sleepable ? &bpf_ima_inode_hash_proto : NULL; + case BPF_FUNC_ima_file_hash: + return prog->aux->sleepable ? &bpf_ima_file_hash_proto : NULL; default: return tracing_prog_func_proto(func_id, prog); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e9978a916c3e..99fab54ae9c0 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5119,6 +5119,16 @@ union bpf_attr { * 0 on success. * **-EINVAL** for invalid input * **-EOPNOTSUPP** for unsupported protocol + * + * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) + * Description + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * Return + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5314,6 +5324,7 @@ union bpf_attr { FN(xdp_store_bytes), \ FN(copy_from_user_task), \ FN(skb_set_tstamp), \ + FN(ima_file_hash), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- cgit v1.2.3-71-gd317 From 1926407a4ab0e59d5a27bed7b82029b356d80fa0 Mon Sep 17 00:00:00 2001 From: Ilya Maximets Date: Wed, 9 Mar 2022 23:20:33 +0100 Subject: net: openvswitch: fix uAPI incompatibility with existing user space Few years ago OVS user space made a strange choice in the commit [1] to define types only valid for the user space inside the copy of a kernel uAPI header. '#ifndef __KERNEL__' and another attribute was added later. This leads to the inevitable clash between user space and kernel types when the kernel uAPI is extended. The issue was unveiled with the addition of a new type for IPv6 extension header in kernel uAPI. When kernel provides the OVS_KEY_ATTR_IPV6_EXTHDRS attribute to the older user space application, application tries to parse it as OVS_KEY_ATTR_PACKET_TYPE and discards the whole netlink message as malformed. Since OVS_KEY_ATTR_IPV6_EXTHDRS is supplied along with every IPv6 packet that goes to the user space, IPv6 support is fully broken. Fixing that by bringing these user space attributes to the kernel uAPI to avoid the clash. Strictly speaking this is not the problem of the kernel uAPI, but changing it is the only way to avoid breakage of the older user space applications at this point. These 2 types are explicitly rejected now since they should not be passed to the kernel. Additionally, OVS_KEY_ATTR_TUNNEL_INFO moved out from the '#ifdef __KERNEL__' as there is no good reason to hide it from the userspace. And it's also explicitly rejected now, because it's for in-kernel use only. Comments with warnings were added to avoid the problem coming back. (1 << type) converted to (1ULL << type) to avoid integer overflow on OVS_KEY_ATTR_IPV6_EXTHDRS, since it equals 32 now. [1] beb75a40fdc2 ("userspace: Switching of L3 packets in L2 pipeline") Fixes: 28a3f0601727 ("net: openvswitch: IPv6: Add IPv6 extension header support") Link: https://lore.kernel.org/netdev/3adf00c7-fe65-3ef4-b6d7-6d8a0cad8a5f@nvidia.com Link: https://github.com/openvswitch/ovs/commit/beb75a40fdc295bfd6521b0068b4cd12f6de507c Reported-by: Roi Dayan Signed-off-by: Ilya Maximets Acked-by: Nicolas Dichtel Acked-by: Aaron Conole Link: https://lore.kernel.org/r/20220309222033.3018976-1-i.maximets@ovn.org Signed-off-by: Jakub Kicinski --- include/uapi/linux/openvswitch.h | 18 ++++++++++++++---- net/openvswitch/flow_netlink.c | 13 ++++++++++--- 2 files changed, 24 insertions(+), 7 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 9d1710f20505..ce3e1738d427 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -351,11 +351,21 @@ enum ovs_key_attr { OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, /* struct ovs_key_ct_tuple_ipv4 */ OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */ OVS_KEY_ATTR_NSH, /* Nested set of ovs_nsh_key_* */ - OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */ -#ifdef __KERNEL__ - OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ -#endif + /* User space decided to squat on types 29 and 30. They are defined + * below, but should not be sent to the kernel. + * + * WARNING: No new types should be added unless they are defined + * for both kernel and user space (no 'ifdef's). It's hard + * to keep compatibility otherwise. + */ + OVS_KEY_ATTR_PACKET_TYPE, /* be32 packet type */ + OVS_KEY_ATTR_ND_EXTENSIONS, /* IPv6 Neighbor Discovery extensions */ + + OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info. + * For in-kernel use only. + */ + OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */ __OVS_KEY_ATTR_MAX }; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 8b4124820f7d..5176f6ccac8e 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -346,7 +346,7 @@ size_t ovs_key_attr_size(void) /* Whenever adding new OVS_KEY_ FIELDS, we should consider * updating this function. */ - BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 30); + BUILD_BUG_ON(OVS_KEY_ATTR_MAX != 32); return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ @@ -482,7 +482,14 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, return -EINVAL; } - if (attrs & (1 << type)) { + if (type == OVS_KEY_ATTR_PACKET_TYPE || + type == OVS_KEY_ATTR_ND_EXTENSIONS || + type == OVS_KEY_ATTR_TUNNEL_INFO) { + OVS_NLERR(log, "Key type %d is not supported", type); + return -EINVAL; + } + + if (attrs & (1ULL << type)) { OVS_NLERR(log, "Duplicate key (type %d).", type); return -EINVAL; } @@ -495,7 +502,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { - attrs |= 1 << type; + attrs |= 1ULL << type; a[type] = nla; } } -- cgit v1.2.3-71-gd317 From 2916b7a9c7c25ecf9be2f37e567a277e861f8e3f Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Tue, 22 Feb 2022 20:36:39 +0530 Subject: nl80211: fix typo of NL80211_IF_TYPE_OCB in documentation It should be NL80211_IFTYPE_OCB instead. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/1645542399-4680-1-git-send-email-quic_vjakkam@quicinc.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 98ed52663d6b..0568a79097b8 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3259,7 +3259,7 @@ enum nl80211_attrs { * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one - * @NL80211_IF_TYPE_OCB: Outside Context of a BSS + * @NL80211_IFTYPE_OCB: Outside Context of a BSS * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev) * @NL80211_IFTYPE_MAX: highest interface type number currently defined -- cgit v1.2.3-71-gd317 From b20dc3c684580ddc07eb48ee3c3dc7597cd5eebf Mon Sep 17 00:00:00 2001 From: Wojciech Drewek Date: Fri, 4 Mar 2022 17:40:42 +0100 Subject: gtp: Allow to create GTP device without FDs Currently, when the user wants to create GTP device, he has to provide file handles to the sockets created in userspace (IFLA_GTP_FD0, IFLA_GTP_FD1). This behaviour is not ideal, considering the option of adding support for GTP device creation through ip link. Ip link application is not a good place to create such sockets. This patch allows to create GTP device without providing IFLA_GTP_FD0 and IFLA_GTP_FD1 arguments. If the user sets IFLA_GTP_CREATE_SOCKETS attribute, then GTP module takes care of creating UDP sockets by itself. Sockets are created with the commonly known UDP ports used for GTP protocol (GTP0_PORT and GTP1U_PORT). In this case we don't have to provide encap_destroy because no extra deinitialization is needed, everything is covered by udp_tunnel_sock_release. Note: GTP instance created with only this change applied, does not handle GTP Echo Requests. This is implemented in the following patch. Signed-off-by: Wojciech Drewek Signed-off-by: Tony Nguyen --- drivers/net/gtp.c | 101 +++++++++++++++++++++++++++++++++++-------- include/uapi/linux/if_link.h | 1 + 2 files changed, 85 insertions(+), 17 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index bf087171bcf0..25d8521897b3 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -66,8 +66,10 @@ struct gtp_dev { struct sock *sk0; struct sock *sk1u; + u8 sk_created; struct net_device *dev; + struct net *net; unsigned int role; unsigned int hash_size; @@ -320,8 +322,16 @@ static void gtp_encap_disable_sock(struct sock *sk) static void gtp_encap_disable(struct gtp_dev *gtp) { - gtp_encap_disable_sock(gtp->sk0); - gtp_encap_disable_sock(gtp->sk1u); + if (gtp->sk_created) { + udp_tunnel_sock_release(gtp->sk0->sk_socket); + udp_tunnel_sock_release(gtp->sk1u->sk_socket); + gtp->sk_created = false; + gtp->sk0 = NULL; + gtp->sk1u = NULL; + } else { + gtp_encap_disable_sock(gtp->sk0); + gtp_encap_disable_sock(gtp->sk1u); + } } /* UDP encapsulation receive handler. See net/ipv4/udp.c. @@ -656,17 +666,69 @@ static void gtp_destructor(struct net_device *dev) kfree(gtp->tid_hash); } +static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp) +{ + struct udp_tunnel_sock_cfg tuncfg = {}; + struct udp_port_cfg udp_conf = { + .local_ip.s_addr = htonl(INADDR_ANY), + .family = AF_INET, + }; + struct net *net = gtp->net; + struct socket *sock; + int err; + + if (type == UDP_ENCAP_GTP0) + udp_conf.local_udp_port = htons(GTP0_PORT); + else if (type == UDP_ENCAP_GTP1U) + udp_conf.local_udp_port = htons(GTP1U_PORT); + else + return ERR_PTR(-EINVAL); + + err = udp_sock_create(net, &udp_conf, &sock); + if (err) + return ERR_PTR(err); + + tuncfg.sk_user_data = gtp; + tuncfg.encap_type = type; + tuncfg.encap_rcv = gtp_encap_recv; + tuncfg.encap_destroy = NULL; + + setup_udp_tunnel_sock(net, sock, &tuncfg); + + return sock->sk; +} + +static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[]) +{ + struct sock *sk1u = NULL; + struct sock *sk0 = NULL; + + sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp); + if (IS_ERR(sk0)) + return PTR_ERR(sk0); + + sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp); + if (IS_ERR(sk1u)) { + udp_tunnel_sock_release(sk0->sk_socket); + return PTR_ERR(sk1u); + } + + gtp->sk_created = true; + gtp->sk0 = sk0; + gtp->sk1u = sk1u; + + return 0; +} + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + unsigned int role = GTP_ROLE_GGSN; struct gtp_dev *gtp; struct gtp_net *gn; int hashsize, err; - if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) - return -EINVAL; - gtp = netdev_priv(dev); if (!data[IFLA_GTP_PDP_HASHSIZE]) { @@ -677,11 +739,23 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, hashsize = 1024; } + if (data[IFLA_GTP_ROLE]) { + role = nla_get_u32(data[IFLA_GTP_ROLE]); + if (role > GTP_ROLE_SGSN) + return -EINVAL; + } + gtp->role = role; + + gtp->net = src_net; + err = gtp_hashtable_new(gtp, hashsize); if (err < 0) return err; - err = gtp_encap_enable(gtp, data); + if (data[IFLA_GTP_CREATE_SOCKETS]) + err = gtp_create_sockets(gtp, data); + else + err = gtp_encap_enable(gtp, data); if (err < 0) goto out_hashtable; @@ -726,6 +800,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 }, + [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -848,7 +923,9 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) { struct sock *sk1u = NULL; struct sock *sk0 = NULL; - unsigned int role = GTP_ROLE_GGSN; + + if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) + return -EINVAL; if (data[IFLA_GTP_FD0]) { u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); @@ -868,18 +945,8 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) } } - if (data[IFLA_GTP_ROLE]) { - role = nla_get_u32(data[IFLA_GTP_ROLE]); - if (role > GTP_ROLE_SGSN) { - gtp_encap_disable_sock(sk0); - gtp_encap_disable_sock(sk1u); - return -EINVAL; - } - } - gtp->sk0 = sk0; gtp->sk1u = sk1u; - gtp->role = role; return 0; } diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ddca20357e7e..ebd2aa3ef809 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -887,6 +887,7 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) -- cgit v1.2.3-71-gd317 From 9af41cc33471ea1efa6f77e188f055cc77d0a5c5 Mon Sep 17 00:00:00 2001 From: Wojciech Drewek Date: Fri, 4 Mar 2022 17:40:43 +0100 Subject: gtp: Implement GTP echo response Adding GTP device through ip link creates the situation where there is no userspace daemon which would handle GTP messages (Echo Request for example). GTP-U instance which would not respond to echo requests would violate GTP specification. When GTP packet arrives with GTP_ECHO_REQ message type, GTP_ECHO_RSP is send to the sender. GTP_ECHO_RSP message should contain information element with GTPIE_RECOVERY tag and restart counter value. For GTPv1 restart counter is not used and should be equal to 0, for GTPv0 restart counter contains information provided from userspace(IFLA_GTP_RESTART_COUNT). Signed-off-by: Wojciech Drewek Suggested-by: Harald Welte Reviewed-by: Harald Welte Tested-by: Harald Welte Signed-off-by: Tony Nguyen --- drivers/net/gtp.c | 213 +++++++++++++++++++++++++++++++++++++++---- include/net/gtp.h | 31 +++++++ include/uapi/linux/if_link.h | 1 + 3 files changed, 229 insertions(+), 16 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 25d8521897b3..bf434d79f868 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -75,6 +75,8 @@ struct gtp_dev { unsigned int hash_size; struct hlist_head *tid_hash; struct hlist_head *addr_hash; + + u8 restart_count; }; static unsigned int gtp_net_id __read_mostly; @@ -217,6 +219,106 @@ err: return -1; } +static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, + const struct sock *sk, + __be32 daddr, __be32 saddr) +{ + memset(fl4, 0, sizeof(*fl4)); + fl4->flowi4_oif = sk->sk_bound_dev_if; + fl4->daddr = daddr; + fl4->saddr = saddr; + fl4->flowi4_tos = RT_CONN_FLAGS(sk); + fl4->flowi4_proto = sk->sk_protocol; + + return ip_route_output_key(sock_net(sk), fl4); +} + +/* GSM TS 09.60. 7.3 + * In all Path Management messages: + * - TID: is not used and shall be set to 0. + * - Flow Label is not used and shall be set to 0 + * In signalling messages: + * - number: this field is not yet used in signalling messages. + * It shall be set to 255 by the sender and shall be ignored + * by the receiver + * Returns true if the echo req was correct, false otherwise. + */ +static bool gtp0_validate_echo_req(struct gtp0_header *gtp0) +{ + return !(gtp0->tid || (gtp0->flags ^ 0x1e) || + gtp0->number != 0xff || gtp0->flow); +} + +static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) +{ + struct gtp0_packet *gtp_pkt; + struct gtp0_header *gtp0; + struct rtable *rt; + struct flowi4 fl4; + struct iphdr *iph; + __be16 seq; + + gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); + + if (!gtp0_validate_echo_req(gtp0)) + return -1; + + seq = gtp0->seq; + + /* pull GTP and UDP headers */ + skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr)); + + gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet)); + memset(gtp_pkt, 0, sizeof(struct gtp0_packet)); + + gtp_pkt->gtp0_h.flags = 0x1e; /* v0, GTP-non-prime. */ + gtp_pkt->gtp0_h.type = GTP_ECHO_RSP; + gtp_pkt->gtp0_h.length = + htons(sizeof(struct gtp0_packet) - sizeof(struct gtp0_header)); + + /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response + * message shall be copied from the signalling request message + * that the GSN is replying to. + */ + gtp_pkt->gtp0_h.seq = seq; + + /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID + * are not used and shall be set to 0. + */ + gtp_pkt->gtp0_h.flow = 0; + gtp_pkt->gtp0_h.tid = 0; + gtp_pkt->gtp0_h.number = 0xff; + gtp_pkt->gtp0_h.spare[0] = 0xff; + gtp_pkt->gtp0_h.spare[1] = 0xff; + gtp_pkt->gtp0_h.spare[2] = 0xff; + + gtp_pkt->ie.tag = GTPIE_RECOVERY; + gtp_pkt->ie.val = gtp->restart_count; + + iph = ip_hdr(skb); + + /* find route to the sender, + * src address becomes dst address and vice versa. + */ + rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); + if (IS_ERR(rt)) { + netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", + &iph->saddr); + return -1; + } + + udp_tunnel_xmit_skb(rt, gtp->sk0, skb, + fl4.saddr, fl4.daddr, + iph->tos, + ip4_dst_hoplimit(&rt->dst), + 0, + htons(GTP0_PORT), htons(GTP0_PORT), + !net_eq(sock_net(gtp->sk1u), + dev_net(gtp->dev)), + false); + return 0; +} + /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { @@ -233,6 +335,13 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if ((gtp0->flags >> 5) != GTP_V0) return 1; + /* If the sockets were created in kernel, it means that + * there is no daemon running in userspace which would + * handle echo request. + */ + if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created) + return gtp0_send_echo_resp(gtp, skb); + if (gtp0->type != GTP_TPDU) return 1; @@ -245,6 +354,75 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) return gtp_rx(pctx, skb, hdrlen, gtp->role); } +static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) +{ + struct gtp1_header_long *gtp1u; + struct gtp1u_packet *gtp_pkt; + struct rtable *rt; + struct flowi4 fl4; + struct iphdr *iph; + + gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); + + /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, + * Error Indication and Supported Extension Headers Notification + * messages, the S flag shall be set to 1 and TEID shall be set to 0. + */ + if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) + return -1; + + /* pull GTP and UDP headers */ + skb_pull_data(skb, + sizeof(struct gtp1_header_long) + sizeof(struct udphdr)); + + gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet)); + memset(gtp_pkt, 0, sizeof(struct gtp1u_packet)); + + /* S flag must be set to 1 */ + gtp_pkt->gtp1u_h.flags = 0x32; + gtp_pkt->gtp1u_h.type = GTP_ECHO_RSP; + /* seq, npdu and next should be counted to the length of the GTP packet + * that's why szie of gtp1_header should be subtracted, + * not why szie of gtp1_header_long. + */ + gtp_pkt->gtp1u_h.length = + htons(sizeof(struct gtp1u_packet) - sizeof(struct gtp1_header)); + /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ + gtp_pkt->gtp1u_h.tid = 0; + + /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the + * Recovery information element shall not be used, i.e. it shall + * be set to zero by the sender and shall be ignored by the receiver. + * The Recovery information element is mandatory due to backwards + * compatibility reasons. + */ + gtp_pkt->ie.tag = GTPIE_RECOVERY; + gtp_pkt->ie.val = 0; + + iph = ip_hdr(skb); + + /* find route to the sender, + * src address becomes dst address and vice versa. + */ + rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr); + if (IS_ERR(rt)) { + netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", + &iph->saddr); + return -1; + } + + udp_tunnel_xmit_skb(rt, gtp->sk1u, skb, + fl4.saddr, fl4.daddr, + iph->tos, + ip4_dst_hoplimit(&rt->dst), + 0, + htons(GTP1U_PORT), htons(GTP1U_PORT), + !net_eq(sock_net(gtp->sk1u), + dev_net(gtp->dev)), + false); + return 0; +} + static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + @@ -260,6 +438,13 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if ((gtp1->flags >> 5) != GTP_V1) return 1; + /* If the sockets were created in kernel, it means that + * there is no daemon running in userspace which would + * handle echo request. + */ + if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created) + return gtp1u_send_echo_resp(gtp, skb); + if (gtp1->type != GTP_TPDU) return 1; @@ -398,20 +583,6 @@ static void gtp_dev_uninit(struct net_device *dev) free_percpu(dev->tstats); } -static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, - const struct sock *sk, - __be32 daddr) -{ - memset(fl4, 0, sizeof(*fl4)); - fl4->flowi4_oif = sk->sk_bound_dev_if; - fl4->daddr = daddr; - fl4->saddr = inet_sk(sk)->inet_saddr; - fl4->flowi4_tos = RT_CONN_FLAGS(sk); - fl4->flowi4_proto = sk->sk_protocol; - - return ip_route_output_key(sock_net(sk), fl4); -} - static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; @@ -517,7 +688,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, } netdev_dbg(dev, "found PDP context %p\n", pctx); - rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr); + rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr, + inet_sk(pctx->sk)->inet_saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI4\n", &pctx->peer_addr_ip4.s_addr); @@ -746,6 +918,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, } gtp->role = role; + if (!data[IFLA_GTP_RESTART_COUNT]) + gtp->restart_count = 0; + else + gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]); + gtp->net = src_net; err = gtp_hashtable_new(gtp, hashsize); @@ -801,6 +978,7 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 }, + [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -815,7 +993,8 @@ static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], static size_t gtp_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */ - nla_total_size(sizeof(__u32)); /* IFLA_GTP_ROLE */ + nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */ + nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */ } static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) @@ -826,6 +1005,8 @@ static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count)) + goto nla_put_failure; return 0; diff --git a/include/net/gtp.h b/include/net/gtp.h index 0e16ebb2a82d..0e12c37f2958 100644 --- a/include/net/gtp.h +++ b/include/net/gtp.h @@ -7,8 +7,13 @@ #define GTP0_PORT 3386 #define GTP1U_PORT 2152 +/* GTP messages types */ +#define GTP_ECHO_REQ 1 /* Echo Request */ +#define GTP_ECHO_RSP 2 /* Echo Response */ #define GTP_TPDU 255 +#define GTPIE_RECOVERY 14 + struct gtp0_header { /* According to GSM TS 09.60. */ __u8 flags; __u8 type; @@ -27,6 +32,32 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */ __be32 tid; } __attribute__ ((packed)); +struct gtp1_header_long { /* According to 3GPP TS 29.060. */ + __u8 flags; + __u8 type; + __be16 length; + __be32 tid; + __be16 seq; + __u8 npdu; + __u8 next; +} __packed; + +/* GTP Information Element */ +struct gtp_ie { + __u8 tag; + __u8 val; +} __packed; + +struct gtp0_packet { + struct gtp0_header gtp0_h; + struct gtp_ie ie; +} __packed; + +struct gtp1u_packet { + struct gtp1_header_long gtp1u_h; + struct gtp_ie ie; +} __packed; + #define GTP1_F_NPDU 0x01 #define GTP1_F_SEQ 0x02 #define GTP1_F_EXTHDR 0x04 diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ebd2aa3ef809..bd24c7dc10a2 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -888,6 +888,7 @@ enum { IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) -- cgit v1.2.3-71-gd317 From d33bd757d362699cfce3c68b53cd12b947d196f4 Mon Sep 17 00:00:00 2001 From: Wojciech Drewek Date: Fri, 4 Mar 2022 17:40:44 +0100 Subject: gtp: Implement GTP echo request Adding GTP device through ip link creates the situation where GTP instance is not able to send GTP echo requests. Echo requests are used to check if GTP peer is still alive. With this patch, gtp_genl_ops are extended by new cmd (GTP_CMD_ECHOREQ) which allows to send echo request in the given version of GTP protocol (v0 or v1), from the given ms address to he given peer. TID is not inclued because in all path management messages it should be equal to 0. When GTP echo response is detected, multicast message is send to everyone in the gtp_genl_family. Message contains GTP version, ms address and peer address. Suggested-by: Harald Welte Signed-off-by: Wojciech Drewek Reviewed-by: Harald Welte Signed-off-by: Tony Nguyen --- drivers/net/gtp.c | 305 +++++++++++++++++++++++++++++++++++++++++------ include/uapi/linux/gtp.h | 1 + 2 files changed, 269 insertions(+), 37 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index bf434d79f868..756714d4ad92 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -79,6 +79,12 @@ struct gtp_dev { u8 restart_count; }; +struct echo_info { + struct in_addr ms_addr_ip4; + struct in_addr peer_addr_ip4; + u8 gtp_version; +}; + static unsigned int gtp_net_id __read_mostly; struct gtp_net { @@ -87,6 +93,16 @@ struct gtp_net { static u32 gtp_h_initval; +static struct genl_family gtp_genl_family; + +enum gtp_multicast_groups { + GTP_GENL_MCGRP, +}; + +static const struct genl_multicast_group gtp_genl_mcgrps[] = { + [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, +}; + static void pdp_context_delete(struct pdp_ctx *pctx); static inline u32 gtp0_hashfn(u64 tid) @@ -243,12 +259,38 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, * by the receiver * Returns true if the echo req was correct, false otherwise. */ -static bool gtp0_validate_echo_req(struct gtp0_header *gtp0) +static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0) { return !(gtp0->tid || (gtp0->flags ^ 0x1e) || gtp0->number != 0xff || gtp0->flow); } +/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ +static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type) +{ + int len_pkt, len_hdr; + + hdr->flags = 0x1e; /* v0, GTP-non-prime. */ + hdr->type = msg_type; + /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID + * are not used and shall be set to 0. + */ + hdr->flow = 0; + hdr->tid = 0; + hdr->number = 0xff; + hdr->spare[0] = 0xff; + hdr->spare[1] = 0xff; + hdr->spare[2] = 0xff; + + len_pkt = sizeof(struct gtp0_packet); + len_hdr = sizeof(struct gtp0_header); + + if (msg_type == GTP_ECHO_RSP) + hdr->length = htons(len_pkt - len_hdr); + else + hdr->length = 0; +} + static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_packet *gtp_pkt; @@ -260,7 +302,7 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); - if (!gtp0_validate_echo_req(gtp0)) + if (!gtp0_validate_echo_hdr(gtp0)) return -1; seq = gtp0->seq; @@ -271,10 +313,7 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet)); memset(gtp_pkt, 0, sizeof(struct gtp0_packet)); - gtp_pkt->gtp0_h.flags = 0x1e; /* v0, GTP-non-prime. */ - gtp_pkt->gtp0_h.type = GTP_ECHO_RSP; - gtp_pkt->gtp0_h.length = - htons(sizeof(struct gtp0_packet) - sizeof(struct gtp0_header)); + gtp0_build_echo_msg(>p_pkt->gtp0_h, GTP_ECHO_RSP); /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response * message shall be copied from the signalling request message @@ -282,16 +321,6 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) */ gtp_pkt->gtp0_h.seq = seq; - /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID - * are not used and shall be set to 0. - */ - gtp_pkt->gtp0_h.flow = 0; - gtp_pkt->gtp0_h.tid = 0; - gtp_pkt->gtp0_h.number = 0xff; - gtp_pkt->gtp0_h.spare[0] = 0xff; - gtp_pkt->gtp0_h.spare[1] = 0xff; - gtp_pkt->gtp0_h.spare[2] = 0xff; - gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = gtp->restart_count; @@ -319,6 +348,61 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) return 0; } +static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, + int flags, u32 type, struct echo_info echo) +{ + void *genlh; + + genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags, + type); + if (!genlh) + goto failure; + + if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || + nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) || + nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr)) + goto failure; + + genlmsg_end(skb, genlh); + return 0; + +failure: + genlmsg_cancel(skb, genlh); + return -EMSGSIZE; +} + +static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) +{ + struct gtp0_header *gtp0; + struct echo_info echo; + struct sk_buff *msg; + struct iphdr *iph; + int ret; + + gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); + + if (!gtp0_validate_echo_hdr(gtp0)) + return -1; + + iph = ip_hdr(skb); + echo.ms_addr_ip4.s_addr = iph->daddr; + echo.peer_addr_ip4.s_addr = iph->saddr; + echo.gtp_version = GTP_V0; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + + ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); + if (ret < 0) { + nlmsg_free(msg); + return ret; + } + + return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev), + msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); +} + /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { @@ -342,6 +426,9 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created) return gtp0_send_echo_resp(gtp, skb); + if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created) + return gtp0_handle_echo_resp(gtp, skb); + if (gtp0->type != GTP_TPDU) return 1; @@ -354,6 +441,36 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) return gtp_rx(pctx, skb, hdrlen, gtp->role); } +/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ +static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type) +{ + int len_pkt, len_hdr; + + /* S flag must be set to 1 */ + hdr->flags = 0x32; /* v1, GTP-non-prime. */ + hdr->type = msg_type; + /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ + hdr->tid = 0; + + /* seq, npdu and next should be counted to the length of the GTP packet + * that's why szie of gtp1_header should be subtracted, + * not size of gtp1_header_long. + */ + + len_hdr = sizeof(struct gtp1_header); + + if (msg_type == GTP_ECHO_RSP) { + len_pkt = sizeof(struct gtp1u_packet); + hdr->length = htons(len_pkt - len_hdr); + } else { + /* GTP_ECHO_REQ does not carry GTP Information Element, + * the why gtp1_header_long is used here. + */ + len_pkt = sizeof(struct gtp1_header_long); + hdr->length = htons(len_pkt - len_hdr); + } +} + static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp1_header_long *gtp1u; @@ -378,17 +495,7 @@ static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet)); memset(gtp_pkt, 0, sizeof(struct gtp1u_packet)); - /* S flag must be set to 1 */ - gtp_pkt->gtp1u_h.flags = 0x32; - gtp_pkt->gtp1u_h.type = GTP_ECHO_RSP; - /* seq, npdu and next should be counted to the length of the GTP packet - * that's why szie of gtp1_header should be subtracted, - * not why szie of gtp1_header_long. - */ - gtp_pkt->gtp1u_h.length = - htons(sizeof(struct gtp1u_packet) - sizeof(struct gtp1_header)); - /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ - gtp_pkt->gtp1u_h.tid = 0; + gtp1u_build_echo_msg(>p_pkt->gtp1u_h, GTP_ECHO_RSP); /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the * Recovery information element shall not be used, i.e. it shall @@ -423,6 +530,42 @@ static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) return 0; } +static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) +{ + struct gtp1_header_long *gtp1u; + struct echo_info echo; + struct sk_buff *msg; + struct iphdr *iph; + int ret; + + gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); + + /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, + * Error Indication and Supported Extension Headers Notification + * messages, the S flag shall be set to 1 and TEID shall be set to 0. + */ + if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) + return -1; + + iph = ip_hdr(skb); + echo.ms_addr_ip4.s_addr = iph->daddr; + echo.peer_addr_ip4.s_addr = iph->saddr; + echo.gtp_version = GTP_V1; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + + ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); + if (ret < 0) { + nlmsg_free(msg); + return ret; + } + + return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev), + msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); +} + static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + @@ -445,6 +588,9 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created) return gtp1u_send_echo_resp(gtp, skb); + if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created) + return gtp1u_handle_echo_resp(gtp, skb); + if (gtp1->type != GTP_TPDU) return 1; @@ -1431,16 +1577,6 @@ out_unlock: return err; } -static struct genl_family gtp_genl_family; - -enum gtp_multicast_groups { - GTP_GENL_MCGRP, -}; - -static const struct genl_multicast_group gtp_genl_mcgrps[] = { - [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, -}; - static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct pdp_ctx *pctx) { @@ -1584,6 +1720,95 @@ out: return skb->len; } +static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *skb_to_send; + __be32 src_ip, dst_ip; + unsigned int version; + struct gtp_dev *gtp; + struct flowi4 fl4; + struct rtable *rt; + struct sock *sk; + __be16 port; + int len; + + if (!info->attrs[GTPA_VERSION] || + !info->attrs[GTPA_LINK] || + !info->attrs[GTPA_PEER_ADDRESS] || + !info->attrs[GTPA_MS_ADDRESS]) + return -EINVAL; + + version = nla_get_u32(info->attrs[GTPA_VERSION]); + dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); + src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + + gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); + if (!gtp) + return -ENODEV; + + if (!gtp->sk_created) + return -EOPNOTSUPP; + if (!(gtp->dev->flags & IFF_UP)) + return -ENETDOWN; + + if (version == GTP_V0) { + struct gtp0_header *gtp0_h; + + len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) + + sizeof(struct iphdr) + sizeof(struct udphdr); + + skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); + if (!skb_to_send) + return -ENOMEM; + + sk = gtp->sk0; + port = htons(GTP0_PORT); + + gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header)); + memset(gtp0_h, 0, sizeof(struct gtp0_header)); + gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ); + } else if (version == GTP_V1) { + struct gtp1_header_long *gtp1u_h; + + len = LL_RESERVED_SPACE(gtp->dev) + + sizeof(struct gtp1_header_long) + + sizeof(struct iphdr) + sizeof(struct udphdr); + + skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); + if (!skb_to_send) + return -ENOMEM; + + sk = gtp->sk1u; + port = htons(GTP1U_PORT); + + gtp1u_h = skb_push(skb_to_send, + sizeof(struct gtp1_header_long)); + memset(gtp1u_h, 0, sizeof(struct gtp1_header_long)); + gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ); + } else { + return -ENODEV; + } + + rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip); + if (IS_ERR(rt)) { + netdev_dbg(gtp->dev, "no route for echo request to %pI4\n", + &dst_ip); + kfree_skb(skb_to_send); + return -ENODEV; + } + + udp_tunnel_xmit_skb(rt, sk, skb_to_send, + fl4.saddr, fl4.daddr, + fl4.flowi4_tos, + ip4_dst_hoplimit(&rt->dst), + 0, + port, port, + !net_eq(sock_net(sk), + dev_net(gtp->dev)), + false); + return 0; +} + static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, @@ -1616,6 +1841,12 @@ static const struct genl_small_ops gtp_genl_ops[] = { .dumpit = gtp_genl_dump_pdp, .flags = GENL_ADMIN_PERM, }, + { + .cmd = GTP_CMD_ECHOREQ, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = gtp_genl_send_echo_req, + .flags = GENL_ADMIN_PERM, + }, }; static struct genl_family gtp_genl_family __ro_after_init = { diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 79f9191bbb24..2f61298a7b77 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -8,6 +8,7 @@ enum gtp_genl_cmds { GTP_CMD_NEWPDP, GTP_CMD_DELPDP, GTP_CMD_GETPDP, + GTP_CMD_ECHOREQ, GTP_CMD_MAX, }; -- cgit v1.2.3-71-gd317 From e3acda7ade0a36c5cbebc2b54d30b7f08a4ba29b Mon Sep 17 00:00:00 2001 From: Wojciech Drewek Date: Fri, 4 Mar 2022 17:40:45 +0100 Subject: net/sched: Allow flower to match on GTP options Options are as follows: PDU_TYPE:QFI and they refernce to the fields from the PDU Session Protocol. PDU Session data is conveyed in GTP-U Extension Header. GTP-U Extension Header is described in 3GPP TS 29.281. PDU Session Protocol is described in 3GPP TS 38.415. PDU_TYPE - indicates the type of the PDU Session Information (4 bits) QFI - QoS Flow Identifier (6 bits) # ip link add gtp_dev type gtp role sgsn # tc qdisc add dev gtp_dev ingress # tc filter add dev gtp_dev protocol ip parent ffff: \ flower \ enc_key_id 11 \ gtp_opts 1:8/ff:ff \ action mirred egress redirect dev eth0 Signed-off-by: Wojciech Drewek Signed-off-by: Tony Nguyen --- include/net/gtp.h | 5 ++ include/uapi/linux/if_tunnel.h | 4 +- include/uapi/linux/pkt_cls.h | 15 ++++++ net/sched/cls_flower.c | 116 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 139 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/net/gtp.h b/include/net/gtp.h index 0e12c37f2958..23c2aaae8a42 100644 --- a/include/net/gtp.h +++ b/include/net/gtp.h @@ -58,6 +58,11 @@ struct gtp1u_packet { struct gtp_ie ie; } __packed; +struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */ + u8 pdu_type; + u8 qfi; +}; + #define GTP1_F_NPDU 0x01 #define GTP1_F_SEQ 0x02 #define GTP1_F_EXTHDR 0x04 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 7d9105533c7b..102119628ff5 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -176,8 +176,10 @@ enum { #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) #define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) +#define TUNNEL_GTP_OPT __cpu_to_be16(0x8000) #define TUNNEL_OPTIONS_PRESENT \ - (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) + (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \ + TUNNEL_GTP_OPT) #endif /* _UAPI_IF_TUNNEL_H_ */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index ee38b35c3f57..404f97fb239c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -616,6 +616,10 @@ enum { * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_ * attributes */ + TCA_FLOWER_KEY_ENC_OPTS_GTP, /* Nested + * TCA_FLOWER_KEY_ENC_OPT_GTP_ + * attributes + */ __TCA_FLOWER_KEY_ENC_OPTS_MAX, }; @@ -654,6 +658,17 @@ enum { #define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \ (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) +enum { + TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, /* u8 */ + + __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1) + enum { TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, TCA_FLOWER_KEY_MPLS_OPTS_LSE, diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1a9b1f140f9e..c80fc49c0da1 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -723,6 +724,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -746,6 +748,12 @@ erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, }; +static const struct nla_policy +gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, +}; + static const struct nla_policy mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, @@ -1262,6 +1270,49 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, return sizeof(*md); } +static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, + int depth, int option_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1]; + struct gtp_pdu_session_info *sinfo; + u8 len = key->enc_opts.len; + int err; + + sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len]; + memset(sinfo, 0xff, option_len); + + if (!depth) + return sizeof(*sinfo); + + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) { + NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla, + gtp_opt_policy, extack); + if (err < 0) + return err; + + if (!option_len && + (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] || + !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) { + NL_SET_ERR_MSG_MOD(extack, + "Missing tunnel key gtp option pdu type or qfi"); + return -EINVAL; + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]) + sinfo->pdu_type = + nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]); + + if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]) + sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]); + + return sizeof(*sinfo); +} + static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -1386,6 +1437,38 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, return -EINVAL; } break; + case TCA_FLOWER_KEY_ENC_OPTS_GTP: + if (key->enc_opts.dst_opt_type) { + NL_SET_ERR_MSG_MOD(extack, + "Duplicate type for gtp options"); + return -EINVAL; + } + option_len = 0; + key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; + option_len = fl_set_gtp_opt(nla_opt_key, key, + key_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + key->enc_opts.len += option_len; + /* At the same time we need to parse through the mask + * in order to verify exact and mask attribute lengths. + */ + mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; + option_len = fl_set_gtp_opt(nla_opt_msk, mask, + msk_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + mask->enc_opts.len += option_len; + if (key->enc_opts.len != mask->enc_opts.len) { + NL_SET_ERR_MSG_MOD(extack, + "Key and mask miss aligned"); + return -EINVAL; + } + break; default: NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); return -EINVAL; @@ -2761,6 +2844,34 @@ nla_put_failure: return -EMSGSIZE; } +static int fl_dump_key_gtp_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *enc_opts) + +{ + struct gtp_pdu_session_info *session_info; + struct nlattr *nest; + + nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP); + if (!nest) + goto nla_put_failure; + + session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0]; + + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, + session_info->pdu_type)) + goto nla_put_failure; + + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int fl_dump_key_ct(struct sk_buff *skb, struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *mask) @@ -2824,6 +2935,11 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, if (err) goto nla_put_failure; break; + case TUNNEL_GTP_OPT: + err = fl_dump_key_gtp_opt(skb, enc_opts); + if (err) + goto nla_put_failure; + break; default: goto nla_put_failure; } -- cgit v1.2.3-71-gd317 From 3b6c6c039707f6bb7c64af2aa82a437fabb93aee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 9 Mar 2022 19:49:48 -0800 Subject: nvdimm/region: Delete nd_blk_region infrastructure Now that the nd_namespace_blk infrastructure is removed, delete all the region machinery to coordinate provisioning aliased capacity between PMEM and BLK. Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/164688418803.2879318.1302315202397235855.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/acpi/nfit/core.c | 11 +- drivers/nvdimm/bus.c | 2 - drivers/nvdimm/dimm_devs.c | 204 +++---------------------------------- drivers/nvdimm/label.c | 6 +- drivers/nvdimm/label.h | 2 +- drivers/nvdimm/namespace_devs.c | 127 +++-------------------- drivers/nvdimm/nd-core.h | 24 +---- drivers/nvdimm/nd.h | 12 --- drivers/nvdimm/region.c | 31 ++---- drivers/nvdimm/region_devs.c | 158 ++++------------------------ include/linux/libnvdimm.h | 24 ----- include/uapi/linux/ndctl.h | 2 - tools/testing/nvdimm/test/ndtest.c | 67 +----------- 13 files changed, 66 insertions(+), 604 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index bea6a219fddd..fe61f617a943 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2036,10 +2036,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; } - /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ - if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) - set_bit(NDD_NOBLK, &flags); - if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); @@ -2602,8 +2598,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, { static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; struct acpi_nfit_system_address *spa = nfit_spa->spa; - struct nd_blk_region_desc ndbr_desc; - struct nd_region_desc *ndr_desc; + struct nd_region_desc *ndr_desc, _ndr_desc; struct nfit_memdev *nfit_memdev; struct nvdimm_bus *nvdimm_bus; struct resource res; @@ -2619,10 +2614,10 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); res.start = spa->address; res.end = res.start + spa->length - 1; - ndr_desc = &ndbr_desc.ndr_desc; + ndr_desc = &_ndr_desc; ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9dc7f3edd42b..a4b5f637e599 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -35,8 +35,6 @@ static int to_nd_device_type(struct device *dev) return ND_DEVICE_DIMM; else if (is_memory(dev)) return ND_DEVICE_REGION_PMEM; - else if (is_nd_blk(dev)) - return ND_DEVICE_REGION_BLK; else if (is_nd_dax(dev)) return ND_DEVICE_DAX_PMEM; else if (is_nd_region(dev->parent)) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index dc7449a40003..ee507eed42b5 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -18,10 +18,6 @@ static DEFINE_IDA(dimm_ida); -static bool noblk; -module_param(noblk, bool, 0444); -MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); - /* * Retrieve bus and dimm handle and return if this bus supports * get_config_data commands @@ -211,22 +207,6 @@ struct nvdimm *to_nvdimm(struct device *dev) } EXPORT_SYMBOL_GPL(to_nvdimm); -struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) -{ - struct nd_region *nd_region = &ndbr->nd_region; - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - - return nd_mapping->nvdimm; -} -EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); - -unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) -{ - /* pmem mapping properties are private to libnvdimm */ - return ARCH_MEMREMAP_PMEM; -} -EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); - struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) { struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -312,8 +292,7 @@ static ssize_t flags_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); - return sprintf(buf, "%s%s%s\n", - test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", + return sprintf(buf, "%s%s\n", test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); } @@ -612,8 +591,6 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, nvdimm->dimm_id = dimm_id; nvdimm->provider_data = provider_data; - if (noblk) - flags |= 1 << NDD_NOBLK; nvdimm->flags = flags; nvdimm->cmd_mask = cmd_mask; nvdimm->num_flush = num_flush; @@ -726,133 +703,6 @@ static unsigned long dpa_align(struct nd_region *nd_region) return nd_region->align / nd_region->ndr_mappings; } -int alias_dpa_busy(struct device *dev, void *data) -{ - resource_size_t map_end, blk_start, new; - struct blk_alloc_info *info = data; - struct nd_mapping *nd_mapping; - struct nd_region *nd_region; - struct nvdimm_drvdata *ndd; - struct resource *res; - unsigned long align; - int i; - - if (!is_memory(dev)) - return 0; - - nd_region = to_nd_region(dev); - for (i = 0; i < nd_region->ndr_mappings; i++) { - nd_mapping = &nd_region->mapping[i]; - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) - break; - } - - if (i >= nd_region->ndr_mappings) - return 0; - - ndd = to_ndd(nd_mapping); - map_end = nd_mapping->start + nd_mapping->size - 1; - blk_start = nd_mapping->start; - - /* - * In the allocation case ->res is set to free space that we are - * looking to validate against PMEM aliasing collision rules - * (i.e. BLK is allocated after all aliased PMEM). - */ - if (info->res) { - if (info->res->start >= nd_mapping->start - && info->res->start < map_end) - /* pass */; - else - return 0; - } - - retry: - /* - * Find the free dpa from the end of the last pmem allocation to - * the end of the interleave-set mapping. - */ - align = dpa_align(nd_region); - if (!align) - return 0; - - for_each_dpa_resource(ndd, res) { - resource_size_t start, end; - - if (strncmp(res->name, "pmem", 4) != 0) - continue; - - start = ALIGN_DOWN(res->start, align); - end = ALIGN(res->end + 1, align) - 1; - if ((start >= blk_start && start < map_end) - || (end >= blk_start && end <= map_end)) { - new = max(blk_start, min(map_end, end) + 1); - if (new != blk_start) { - blk_start = new; - goto retry; - } - } - } - - /* update the free space range with the probed blk_start */ - if (info->res && blk_start > info->res->start) { - info->res->start = max(info->res->start, blk_start); - if (info->res->start > info->res->end) - info->res->end = info->res->start - 1; - return 1; - } - - info->available -= blk_start - nd_mapping->start; - - return 0; -} - -/** - * nd_blk_available_dpa - account the unused dpa of BLK region - * @nd_mapping: container of dpa-resource-root + labels - * - * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but - * we arrange for them to never start at an lower dpa than the last - * PMEM allocation in an aliased region. - */ -resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) -{ - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct blk_alloc_info info = { - .nd_mapping = nd_mapping, - .available = nd_mapping->size, - .res = NULL, - }; - struct resource *res; - unsigned long align; - - if (!ndd) - return 0; - - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); - - /* now account for busy blk allocations in unaliased dpa */ - align = dpa_align(nd_region); - if (!align) - return 0; - for_each_dpa_resource(ndd, res) { - resource_size_t start, end, size; - - if (strncmp(res->name, "blk", 3) != 0) - continue; - start = ALIGN_DOWN(res->start, align); - end = ALIGN(res->end + 1, align) - 1; - size = end - start + 1; - if (size >= info.available) - return 0; - info.available -= size; - } - - return info.available; -} - /** * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max * contiguous unallocated dpa range. @@ -900,24 +750,16 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa * @nd_mapping: container of dpa-resource-root + labels * @nd_region: constrain available space check to this reference region - * @overlap: calculate available space assuming this level of overlap * * Validate that a PMEM label, if present, aligns with the start of an - * interleave set and truncate the available size at the lowest BLK - * overlap point. - * - * The expectation is that this routine is called multiple times as it - * probes for the largest BLK encroachment for any single member DIMM of - * the interleave set. Once that value is determined the PMEM-limit for - * the set can be established. + * interleave set. */ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, - struct nd_mapping *nd_mapping, resource_size_t *overlap) + struct nd_mapping *nd_mapping) { - resource_size_t map_start, map_end, busy = 0, available, blk_start; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + resource_size_t map_start, map_end, busy = 0; struct resource *res; - const char *reason; unsigned long align; if (!ndd) @@ -929,46 +771,28 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, map_start = nd_mapping->start; map_end = map_start + nd_mapping->size - 1; - blk_start = max(map_start, map_end + 1 - *overlap); for_each_dpa_resource(ndd, res) { resource_size_t start, end; start = ALIGN_DOWN(res->start, align); end = ALIGN(res->end + 1, align) - 1; if (start >= map_start && start < map_end) { - if (strncmp(res->name, "blk", 3) == 0) - blk_start = min(blk_start, - max(map_start, start)); - else if (end > map_end) { - reason = "misaligned to iset"; - goto err; - } else - busy += end - start + 1; + if (end > map_end) { + nd_dbg_dpa(nd_region, ndd, res, + "misaligned to iset\n"); + return 0; + } + busy += end - start + 1; } else if (end >= map_start && end <= map_end) { - if (strncmp(res->name, "blk", 3) == 0) { - /* - * If a BLK allocation overlaps the start of - * PMEM the entire interleave set may now only - * be used for BLK. - */ - blk_start = map_start; - } else - busy += end - start + 1; + busy += end - start + 1; } else if (map_start > start && map_start < end) { /* total eclipse of the mapping */ busy += nd_mapping->size; - blk_start = map_start; } } - *overlap = map_end + 1 - blk_start; - available = blk_start - map_start; - if (busy < available) - return ALIGN_DOWN(available - busy, align); - return 0; - - err: - nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); + if (busy < nd_mapping->size) + return ALIGN_DOWN(nd_mapping->size - busy, align); return 0; } @@ -999,7 +823,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, /** * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id * @nvdimm: container of dpa-resource-root + labels - * @label_id: dpa resource name of the form {pmem|blk}- + * @label_id: dpa resource name of the form pmem- */ resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id) diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 8c972bcb2ac3..082253a3a956 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -334,8 +334,7 @@ char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, { if (!label_id || !uuid) return NULL; - snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb", - flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid); + snprintf(label_id->id, ND_LABEL_ID_SIZE, "pmem-%pUb", uuid); return label_id->id; } @@ -406,7 +405,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) return 0; /* no label, nothing to reserve */ for_each_clear_bit_le(slot, free, nslot) { - struct nvdimm *nvdimm = to_nvdimm(ndd->dev); struct nd_namespace_label *nd_label; struct nd_region *nd_region = NULL; struct nd_label_id label_id; @@ -421,8 +419,6 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) nsl_get_uuid(ndd, nd_label, &label_uuid); flags = nsl_get_flags(ndd, nd_label); - if (test_bit(NDD_NOBLK, &nvdimm->flags)) - flags &= ~NSLABEL_FLAG_LOCAL; nd_label_gen_id(&label_id, &label_uuid, flags); res = nvdimm_allocate_dpa(ndd, &label_id, nsl_get_dpa(ndd, nd_label), diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 198ef1df298b..0650fb4b9821 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -193,7 +193,7 @@ struct nd_namespace_label { /** * struct nd_label_id - identifier string for dpa allocation - * @id: "{blk|pmem}-" + * @id: "pmem-" */ struct nd_label_id { char id[ND_LABEL_ID_SIZE]; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index d1c190b02657..62b83b2e26e3 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -297,13 +297,11 @@ static int scan_free(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_label_id *label_id, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); int rc = 0; while (n) { struct resource *res, *last; - resource_size_t new_start; last = NULL; for_each_dpa_resource(ndd, res) @@ -321,16 +319,7 @@ static int scan_free(struct nd_region *nd_region, continue; } - /* - * Keep BLK allocations relegated to high DPA as much as - * possible - */ - if (is_blk) - new_start = res->start + n; - else - new_start = res->start; - - rc = adjust_resource(res, new_start, resource_size(res) - n); + rc = adjust_resource(res, res->start, resource_size(res) - n); if (rc == 0) res->flags |= DPA_RESOURCE_ADJUSTED; nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); @@ -372,20 +361,12 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, struct nd_region *nd_region, struct nd_mapping *nd_mapping, resource_size_t n) { - bool is_blk = strncmp(label_id->id, "blk", 3) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - resource_size_t first_dpa; struct resource *res; int rc = 0; - /* allocate blk from highest dpa first */ - if (is_blk) - first_dpa = nd_mapping->start + nd_mapping->size - n; - else - first_dpa = nd_mapping->start; - /* first resource allocation for this label-id or dimm */ - res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); + res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n); if (!res) rc = -EBUSY; @@ -416,7 +397,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, resource_size_t n, struct resource *valid) { bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; unsigned long align; align = nd_region->align / nd_region->ndr_mappings; @@ -429,21 +409,6 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, if (is_reserve) return; - if (!is_pmem) { - struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - struct nvdimm_bus *nvdimm_bus; - struct blk_alloc_info info = { - .nd_mapping = nd_mapping, - .available = nd_mapping->size, - .res = valid, - }; - - WARN_ON(!is_nd_blk(&nd_region->dev)); - nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); - return; - } - /* allocation needs to be contiguous, so this is all or nothing */ if (resource_size(valid) < n) goto invalid; @@ -471,7 +436,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, resource_size_t n) { resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res, *exist = NULL, valid; const resource_size_t to_allocate = n; @@ -569,10 +533,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, } if (strcmp(action, "allocate") == 0) { - /* BLK allocate bottom up */ - if (!is_pmem) - valid.start += available - allocate; - new_res = nvdimm_allocate_dpa(ndd, label_id, valid.start, allocate); if (!new_res) @@ -608,12 +568,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, return 0; } - /* - * If we allocated nothing in the BLK case it may be because we are in - * an initial "pmem-reserve pass". Only do an initial BLK allocation - * when none of the DPA space is reserved. - */ - if ((is_pmem || !ndd->dpa.child) && n == to_allocate) + if (n == to_allocate) return init_dpa_allocation(label_id, nd_region, nd_mapping, n); return n; } @@ -672,7 +627,7 @@ int __reserve_free_pmem(struct device *dev, void *data) if (nd_mapping->nvdimm != nvdimm) continue; - n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); + n = nd_pmem_available_dpa(nd_region, nd_mapping); if (n == 0) return 0; rem = scan_allocate(nd_region, nd_mapping, &label_id, n); @@ -697,19 +652,6 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus, nvdimm_free_dpa(ndd, res); } -static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, - struct nd_mapping *nd_mapping) -{ - struct nvdimm *nvdimm = nd_mapping->nvdimm; - int rc; - - rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, - __reserve_free_pmem); - if (rc) - release_free_pmem(nvdimm_bus, nd_mapping); - return rc; -} - /** * grow_dpa_allocation - for each dimm allocate n bytes for @label_id * @nd_region: the set of dimms to allocate @n more bytes from @@ -726,37 +668,14 @@ static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, static int grow_dpa_allocation(struct nd_region *nd_region, struct nd_label_id *label_id, resource_size_t n) { - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; int i; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; resource_size_t rem = n; - int rc, j; - - /* - * In the BLK case try once with all unallocated PMEM - * reserved, and once without - */ - for (j = is_pmem; j < 2; j++) { - bool blk_only = j == 0; - - if (blk_only) { - rc = reserve_free_pmem(nvdimm_bus, nd_mapping); - if (rc) - return rc; - } - rem = scan_allocate(nd_region, nd_mapping, - label_id, rem); - if (blk_only) - release_free_pmem(nvdimm_bus, nd_mapping); - - /* try again and allow encroachments into PMEM */ - if (rem == 0) - break; - } + int rc; + rem = scan_allocate(nd_region, nd_mapping, label_id, rem); dev_WARN_ONCE(&nd_region->dev, rem, "allocation underrun: %#llx of %#llx bytes\n", (unsigned long long) n - rem, @@ -869,8 +788,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) ndd = to_ndd(nd_mapping); /* - * All dimms in an interleave set, or the base dimm for a blk - * region, need to be enabled for the size to be changed. + * All dimms in an interleave set, need to be enabled + * for the size to be changed. */ if (!ndd) return -ENXIO; @@ -1169,9 +1088,6 @@ static ssize_t resource_show(struct device *dev, } static DEVICE_ATTR_ADMIN_RO(resource); -static const unsigned long blk_lbasize_supported[] = { 512, 520, 528, - 4096, 4104, 4160, 4224, 0 }; - static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 }; static ssize_t sector_size_show(struct device *dev, @@ -1823,10 +1739,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, /* * Fix up each mapping's 'labels' to have the validated pmem label for * that position at labels[0], and NULL at labels[1]. In the process, - * check that the namespace aligns with interleave-set. We know - * that it does not overlap with any blk namespaces by virtue of - * the dimm being enabled (i.e. nd_label_reserve_dpa() - * succeeded). + * check that the namespace aligns with interleave-set. */ nsl_get_uuid(ndd, nd_label, &uuid); rc = select_pmem_id(nd_region, &uuid); @@ -1931,8 +1844,7 @@ void nd_region_create_ns_seed(struct nd_region *nd_region) * disabled until memory becomes available */ if (!nd_region->ns_seed) - dev_err(&nd_region->dev, "failed to create %s namespace\n", - is_nd_blk(&nd_region->dev) ? "blk" : "pmem"); + dev_err(&nd_region->dev, "failed to create namespace\n"); else nd_device_register(nd_region->ns_seed); } @@ -2028,16 +1940,9 @@ static struct device **scan_labels(struct nd_region *nd_region) list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; struct device **__devs; - u32 flags; if (!nd_label) continue; - flags = nsl_get_flags(ndd, nd_label); - if (is_nd_blk(&nd_region->dev) - == !!(flags & NSLABEL_FLAG_LOCAL)) - /* pass, region matches label type */; - else - continue; /* skip labels that describe extents outside of the region */ if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || @@ -2073,9 +1978,8 @@ static struct device **scan_labels(struct nd_region *nd_region) } - dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n", - count, is_nd_blk(&nd_region->dev) - ? "blk" : "pmem", count == 1 ? "" : "s"); + dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, + count == 1 ? "" : "s"); if (count == 0) { struct nd_namespace_pmem *nspm; @@ -2226,12 +2130,6 @@ static int init_active_labels(struct nd_region *nd_region) if (!label_ent) break; label = nd_label_active(ndd, j); - if (test_bit(NDD_NOBLK, &nvdimm->flags)) { - u32 flags = nsl_get_flags(ndd, label); - - flags &= ~NSLABEL_FLAG_LOCAL; - nsl_set_flags(ndd, label, flags); - } label_ent->label = label; mutex_lock(&nd_mapping->lock); @@ -2275,7 +2173,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) devs = create_namespace_io(nd_region); break; case ND_DEVICE_NAMESPACE_PMEM: - case ND_DEVICE_NAMESPACE_BLK: devs = create_namespaces(nd_region); break; default: diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index e4af0719cf33..17febf9ac911 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -82,30 +82,12 @@ static inline void nvdimm_security_overwrite_query(struct work_struct *work) } #endif -/** - * struct blk_alloc_info - tracking info for BLK dpa scanning - * @nd_mapping: blk region mapping boundaries - * @available: decremented in alias_dpa_busy as aliased PMEM is scanned - * @busy: decremented in blk_dpa_busy to account for ranges already - * handled by alias_dpa_busy - * @res: alias_dpa_busy interprets this a free space range that needs to - * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy - * treats it as a busy range that needs the aliased PMEM ranges - * truncated. - */ -struct blk_alloc_info { - struct nd_mapping *nd_mapping; - resource_size_t available, busy; - struct resource *res; -}; - bool is_nvdimm(struct device *dev); bool is_nd_pmem(struct device *dev); bool is_nd_volatile(struct device *dev); -bool is_nd_blk(struct device *dev); static inline bool is_nd_region(struct device *dev) { - return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); + return is_nd_pmem(dev) || is_nd_volatile(dev); } static inline bool is_memory(struct device *dev) { @@ -142,14 +124,12 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping); resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, - struct nd_mapping *nd_mapping, resource_size_t *overlap); -resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); + struct nd_mapping *nd_mapping); resource_size_t nd_region_available_dpa(struct nd_region *nd_region); int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, resource_size_t size); resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); -int alias_dpa_busy(struct device *dev, void *data); int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); void get_ndd(struct nvdimm_drvdata *ndd); resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 8391bf2729bc..ec5219680092 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -295,9 +295,6 @@ static inline const u8 *nsl_uuid_raw(struct nvdimm_drvdata *ndd, return nd_label->efi.uuid; } -bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, - struct nd_namespace_label *nd_label, - u64 isetcookie); bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, guid_t *guid); enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, @@ -437,14 +434,6 @@ static inline bool nsl_validate_nlabel(struct nd_region *nd_region, return nsl_get_nlabel(ndd, nd_label) == nd_region->ndr_mappings; } -struct nd_blk_region { - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); - void *blk_provider_data; - struct nd_region nd_region; -}; - /* * Lookup next in the repeating sequence of 01, 10, and 11. */ @@ -672,7 +661,6 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return -ENXIO; } #endif -int nd_blk_region_init(struct nd_region *nd_region); int nd_region_activate(struct nd_region *nd_region); static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index e0c34120df37..188560b1c110 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -15,6 +15,10 @@ static int nd_region_probe(struct device *dev) static unsigned long once; struct nd_region_data *ndrd; struct nd_region *nd_region = to_nd_region(dev); + struct range range = { + .start = nd_region->ndr_start, + .end = nd_region->ndr_start + nd_region->ndr_size - 1, + }; if (nd_region->num_lanes > num_online_cpus() && nd_region->num_lanes < num_possible_cpus() @@ -30,25 +34,13 @@ static int nd_region_probe(struct device *dev) if (rc) return rc; - rc = nd_blk_region_init(nd_region); - if (rc) - return rc; - - if (is_memory(&nd_region->dev)) { - struct range range = { - .start = nd_region->ndr_start, - .end = nd_region->ndr_start + nd_region->ndr_size - 1, - }; - - if (devm_init_badblocks(dev, &nd_region->bb)) - return -ENODEV; - nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd, - "badblocks"); - if (!nd_region->bb_state) - dev_warn(&nd_region->dev, - "'badblocks' notification disabled\n"); - nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); - } + if (devm_init_badblocks(dev, &nd_region->bb)) + return -ENODEV; + nd_region->bb_state = + sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks"); + if (!nd_region->bb_state) + dev_warn(dev, "'badblocks' notification disabled\n"); + nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range); rc = nd_region_register_namespaces(nd_region, &err); if (rc < 0) @@ -158,4 +150,3 @@ void nd_region_exit(void) } MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM); -MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 70ad891a76ba..0cb274c2b508 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -134,10 +134,7 @@ static void nd_region_release(struct device *dev) } free_percpu(nd_region->lane); memregion_free(nd_region->id); - if (is_nd_blk(dev)) - kfree(to_nd_blk_region(dev)); - else - kfree(nd_region); + kfree(nd_region); } struct nd_region *to_nd_region(struct device *dev) @@ -157,33 +154,12 @@ struct device *nd_region_dev(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nd_region_dev); -struct nd_blk_region *to_nd_blk_region(struct device *dev) -{ - struct nd_region *nd_region = to_nd_region(dev); - - WARN_ON(!is_nd_blk(dev)); - return container_of(nd_region, struct nd_blk_region, nd_region); -} -EXPORT_SYMBOL_GPL(to_nd_blk_region); - void *nd_region_provider_data(struct nd_region *nd_region) { return nd_region->provider_data; } EXPORT_SYMBOL_GPL(nd_region_provider_data); -void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) -{ - return ndbr->blk_provider_data; -} -EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); - -void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) -{ - ndbr->blk_provider_data = data; -} -EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); - /** * nd_region_to_nstype() - region to an integer namespace type * @nd_region: region-device to interrogate @@ -208,8 +184,6 @@ int nd_region_to_nstype(struct nd_region *nd_region) return ND_DEVICE_NAMESPACE_PMEM; else return ND_DEVICE_NAMESPACE_IO; - } else if (is_nd_blk(&nd_region->dev)) { - return ND_DEVICE_NAMESPACE_BLK; } return 0; @@ -332,14 +306,12 @@ static DEVICE_ATTR_RO(set_cookie); resource_size_t nd_region_available_dpa(struct nd_region *nd_region) { - resource_size_t blk_max_overlap = 0, available, overlap; + resource_size_t available; int i; WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); - retry: available = 0; - overlap = blk_max_overlap; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -348,15 +320,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) if (!ndd) return 0; - if (is_memory(&nd_region->dev)) { - available += nd_pmem_available_dpa(nd_region, - nd_mapping, &overlap); - if (overlap > blk_max_overlap) { - blk_max_overlap = overlap; - goto retry; - } - } else if (is_nd_blk(&nd_region->dev)) - available += nd_blk_available_dpa(nd_region); + available += nd_pmem_available_dpa(nd_region, nd_mapping); } return available; @@ -364,26 +328,17 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) { - resource_size_t available = 0; + resource_size_t avail = 0; int i; - if (is_memory(&nd_region->dev)) - available = PHYS_ADDR_MAX; - WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; - if (is_memory(&nd_region->dev)) - available = min(available, - nd_pmem_max_contiguous_dpa(nd_region, - nd_mapping)); - else if (is_nd_blk(&nd_region->dev)) - available += nd_blk_available_dpa(nd_region); + avail = min_not_zero(avail, nd_pmem_max_contiguous_dpa( + nd_region, nd_mapping)); } - if (is_memory(&nd_region->dev)) - return available * nd_region->ndr_mappings; - return available; + return avail * nd_region->ndr_mappings; } static ssize_t available_size_show(struct device *dev, @@ -693,9 +648,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) && a != &dev_attr_available_size.attr) return a->mode; - if ((type == ND_DEVICE_NAMESPACE_PMEM - || type == ND_DEVICE_NAMESPACE_BLK) - && a == &dev_attr_available_size.attr) + if (type == ND_DEVICE_NAMESPACE_PMEM && + a == &dev_attr_available_size.attr) return a->mode; else if (is_memory(dev) && nd_set) return a->mode; @@ -828,12 +782,6 @@ static const struct attribute_group *nd_region_attribute_groups[] = { NULL, }; -static const struct device_type nd_blk_device_type = { - .name = "nd_blk", - .release = nd_region_release, - .groups = nd_region_attribute_groups, -}; - static const struct device_type nd_pmem_device_type = { .name = "nd_pmem", .release = nd_region_release, @@ -851,11 +799,6 @@ bool is_nd_pmem(struct device *dev) return dev ? dev->type == &nd_pmem_device_type : false; } -bool is_nd_blk(struct device *dev) -{ - return dev ? dev->type == &nd_blk_device_type : false; -} - bool is_nd_volatile(struct device *dev) { return dev ? dev->type == &nd_volatile_device_type : false; @@ -929,22 +872,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) nvdimm_bus_unlock(dev); } -int nd_blk_region_init(struct nd_region *nd_region) -{ - struct device *dev = &nd_region->dev; - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - - if (!is_nd_blk(dev)) - return 0; - - if (nd_region->ndr_mappings < 1) { - dev_dbg(dev, "invalid BLK region\n"); - return -ENXIO; - } - - return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); -} - /** * nd_region_acquire_lane - allocate and lock a lane * @nd_region: region id and number of lanes possible @@ -1007,24 +934,10 @@ EXPORT_SYMBOL(nd_region_release_lane); static unsigned long default_align(struct nd_region *nd_region) { unsigned long align; - int i, mappings; u32 remainder; + int mappings; - if (is_nd_blk(&nd_region->dev)) - align = PAGE_SIZE; - else - align = MEMREMAP_COMPAT_ALIGN_MAX; - - for (i = 0; i < nd_region->ndr_mappings; i++) { - struct nd_mapping *nd_mapping = &nd_region->mapping[i]; - struct nvdimm *nvdimm = nd_mapping->nvdimm; - - if (test_bit(NDD_ALIASING, &nvdimm->flags)) { - align = MEMREMAP_COMPAT_ALIGN_MAX; - break; - } - } - + align = MEMREMAP_COMPAT_ALIGN_MAX; if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) align = PAGE_SIZE; @@ -1042,7 +955,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, { struct nd_region *nd_region; struct device *dev; - void *region_buf; unsigned int i; int ro = 0; @@ -1060,36 +972,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, if (test_bit(NDD_UNARMED, &nvdimm->flags)) ro = 1; - if (test_bit(NDD_NOBLK, &nvdimm->flags) - && dev_type == &nd_blk_device_type) { - dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", - caller, dev_name(&nvdimm->dev), i); - return NULL; - } } - if (dev_type == &nd_blk_device_type) { - struct nd_blk_region_desc *ndbr_desc; - struct nd_blk_region *ndbr; - - ndbr_desc = to_blk_region_desc(ndr_desc); - ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) - * ndr_desc->num_mappings, - GFP_KERNEL); - if (ndbr) { - nd_region = &ndbr->nd_region; - ndbr->enable = ndbr_desc->enable; - ndbr->do_io = ndbr_desc->do_io; - } - region_buf = ndbr; - } else { - nd_region = kzalloc(struct_size(nd_region, mapping, - ndr_desc->num_mappings), - GFP_KERNEL); - region_buf = nd_region; - } + nd_region = + kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings), + GFP_KERNEL); - if (!region_buf) + if (!nd_region) return NULL; nd_region->id = memregion_alloc(GFP_KERNEL); if (nd_region->id < 0) @@ -1153,7 +1042,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, err_percpu: memregion_free(nd_region->id); err_id: - kfree(region_buf); + kfree(nd_region); return NULL; } @@ -1166,17 +1055,6 @@ struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, } EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); -struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, - struct nd_region_desc *ndr_desc) -{ - if (ndr_desc->num_mappings > 1) - return NULL; - ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); - return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, - __func__); -} -EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); - struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc) { @@ -1201,7 +1079,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) } /** * nvdimm_flush - flush any posted write queues between the cpu and pmem media - * @nd_region: blk or interleaved pmem region + * @nd_region: interleaved pmem region */ int generic_nvdimm_flush(struct nd_region *nd_region) { @@ -1234,7 +1112,7 @@ EXPORT_SYMBOL_GPL(nvdimm_flush); /** * nvdimm_has_flush - determine write flushing requirements - * @nd_region: blk or interleaved pmem region + * @nd_region: interleaved pmem region * * Returns 1 if writes require flushing * Returns 0 if writes do not require flushing diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7074aa9af525..0d61e07b6827 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -25,8 +25,6 @@ struct badrange { }; enum { - /* when a dimm supports both PMEM and BLK access a label is required */ - NDD_ALIASING = 0, /* unarmed memory devices may not persist writes */ NDD_UNARMED = 1, /* locked memory devices should not be accessed */ @@ -35,8 +33,6 @@ enum { NDD_SECURITY_OVERWRITE = 3, /* tracking whether or not there is a pending device reference */ NDD_WORK_PENDING = 4, - /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ - NDD_NOBLK = 5, /* dimm supports namespace labels */ NDD_LABELING = 6, @@ -140,21 +136,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, } struct nvdimm_bus; -struct module; -struct nd_blk_region; -struct nd_blk_region_desc { - int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); - int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw); - struct nd_region_desc ndr_desc; -}; - -static inline struct nd_blk_region_desc *to_blk_region_desc( - struct nd_region_desc *ndr_desc) -{ - return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc); - -} /* * Note that separate bits for locked + unlocked are defined so that @@ -257,7 +238,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); struct device *nd_region_dev(struct nd_region *nd_region); -struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); const char *nvdimm_name(struct nvdimm *nvdimm); @@ -295,10 +275,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc); void *nd_region_provider_data(struct nd_region *nd_region); -void *nd_blk_region_provider_data(struct nd_blk_region *ndbr); -void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data); -struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr); -unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 8cf1e4884fd5..17e02b64ea2e 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -189,7 +189,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) #define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */ #define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */ #define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */ -#define ND_DEVICE_NAMESPACE_BLK 6 /* BLK namespace (may alias with PMEM) */ #define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */ enum nd_driver_flags { @@ -198,7 +197,6 @@ enum nd_driver_flags { ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK, ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO, ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM, - ND_DRIVER_NAMESPACE_BLK = 1 << ND_DEVICE_NAMESPACE_BLK, ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, }; diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c index 3ca7c32e9362..4d1a947367f9 100644 --- a/tools/testing/nvdimm/test/ndtest.c +++ b/tools/testing/nvdimm/test/ndtest.c @@ -338,62 +338,6 @@ static int ndtest_ctl(struct nvdimm_bus_descriptor *nd_desc, return 0; } -static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, - void *iobuf, u64 len, int rw) -{ - struct ndtest_dimm *dimm = ndbr->blk_provider_data; - struct ndtest_blk_mmio *mmio = dimm->mmio; - struct nd_region *nd_region = &ndbr->nd_region; - unsigned int lane; - - if (!mmio) - return -ENOMEM; - - lane = nd_region_acquire_lane(nd_region); - if (rw) - memcpy(mmio->base + dpa, iobuf, len); - else { - memcpy(iobuf, mmio->base + dpa, len); - arch_invalidate_pmem(mmio->base + dpa, len); - } - - nd_region_release_lane(nd_region, lane); - - return 0; -} - -static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus, - struct device *dev) -{ - struct nd_blk_region *ndbr = to_nd_blk_region(dev); - struct nvdimm *nvdimm; - struct ndtest_dimm *dimm; - struct ndtest_blk_mmio *mmio; - - nvdimm = nd_blk_region_to_dimm(ndbr); - dimm = nvdimm_provider_data(nvdimm); - - nd_blk_region_set_provider_data(ndbr, dimm); - dimm->blk_region = to_nd_region(dev); - - mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL); - if (!mmio) - return -ENOMEM; - - mmio->base = (void __iomem *) devm_nvdimm_memremap( - dev, dimm->address, 12, nd_blk_memremap_flags(ndbr)); - if (!mmio->base) { - dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm)); - return -ENOMEM; - } - mmio->size = dimm->size; - mmio->base_offset = 0; - - dimm->mmio = mmio; - - return 0; -} - static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr) { int i; @@ -523,17 +467,16 @@ static int ndtest_create_region(struct ndtest_priv *p, struct ndtest_region *region) { struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING]; - struct nd_blk_region_desc ndbr_desc; + struct nd_region_desc *ndr_desc, _ndr_desc; struct nd_interleave_set *nd_set; - struct nd_region_desc *ndr_desc; struct resource res; int i, ndimm = region->mapping[0].dimm; u64 uuid[2]; memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); - memset(&ndbr_desc, 0, sizeof(ndbr_desc)); - ndr_desc = &ndbr_desc.ndr_desc; + memset(&_ndr_desc, 0, sizeof(_ndr_desc)); + ndr_desc = &_ndr_desc; if (!ndtest_alloc_resource(p, region->size, &res.start)) return -ENOMEM; @@ -857,10 +800,8 @@ static int ndtest_dimm_register(struct ndtest_priv *priv, struct device *dev = &priv->pdev.dev; unsigned long dimm_flags = dimm->flags; - if (dimm->num_formats > 1) { - set_bit(NDD_ALIASING, &dimm_flags); + if (dimm->num_formats > 1) set_bit(NDD_LABELING, &dimm_flags); - } if (dimm->flags & PAPR_PMEM_UNARMED_MASK) set_bit(NDD_UNARMED, &dimm_flags); -- cgit v1.2.3-71-gd317 From 8109517b394e6deab5fd21cc5460e82ffed229c6 Mon Sep 17 00:00:00 2001 From: Arnaud Pouliquen Date: Mon, 24 Jan 2022 11:25:24 +0100 Subject: rpmsg: ctrl: Introduce new RPMSG_CREATE/RELEASE_DEV_IOCTL controls Allow the user space application to create and release an rpmsg device by adding RPMSG_CREATE_DEV_IOCTL and RPMSG_RELEASE_DEV_IOCTL ioctrls to the /dev/rpmsg_ctrl interface The RPMSG_CREATE_DEV_IOCTL Ioctl can be used to instantiate a local rpmsg device. Depending on the back-end implementation, the associated rpmsg driver is probed and a NS announcement can be sent to the remote processor. The RPMSG_RELEASE_DEV_IOCTL allows the user application to release a rpmsg device created either by the remote processor or with the RPMSG_CREATE_DEV_IOCTL call. Depending on the back-end implementation, the associated rpmsg driver is removed and a NS destroy rpmsg can be sent to the remote processor. Suggested-by: Mathieu Poirier Signed-off-by: Arnaud Pouliquen Reviewed-by: Mathieu Poirier Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220124102524.295783-12-arnaud.pouliquen@foss.st.com --- drivers/rpmsg/rpmsg_ctrl.c | 36 ++++++++++++++++++++++++++++++++---- include/uapi/linux/rpmsg.h | 10 ++++++++++ 2 files changed, 42 insertions(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/rpmsg/rpmsg_ctrl.c b/drivers/rpmsg/rpmsg_ctrl.c index f43b5e4dbb4c..107da70fdbaa 100644 --- a/drivers/rpmsg/rpmsg_ctrl.c +++ b/drivers/rpmsg/rpmsg_ctrl.c @@ -43,11 +43,13 @@ static DEFINE_IDA(rpmsg_minor_ida); * @rpdev: underlaying rpmsg device * @cdev: cdev for the ctrl device * @dev: device for the ctrl device + * @ctrl_lock: serialize the ioctrls. */ struct rpmsg_ctrldev { struct rpmsg_device *rpdev; struct cdev cdev; struct device dev; + struct mutex ctrl_lock; }; static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp) @@ -76,9 +78,8 @@ static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd, void __user *argp = (void __user *)arg; struct rpmsg_endpoint_info eptinfo; struct rpmsg_channel_info chinfo; - - if (cmd != RPMSG_CREATE_EPT_IOCTL) - return -EINVAL; + struct rpmsg_device *rpdev; + int ret = 0; if (copy_from_user(&eptinfo, argp, sizeof(eptinfo))) return -EFAULT; @@ -88,7 +89,33 @@ static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd, chinfo.src = eptinfo.src; chinfo.dst = eptinfo.dst; - return rpmsg_chrdev_eptdev_create(ctrldev->rpdev, &ctrldev->dev, chinfo); + mutex_lock(&ctrldev->ctrl_lock); + switch (cmd) { + case RPMSG_CREATE_EPT_IOCTL: + ret = rpmsg_chrdev_eptdev_create(ctrldev->rpdev, &ctrldev->dev, chinfo); + break; + + case RPMSG_CREATE_DEV_IOCTL: + rpdev = rpmsg_create_channel(ctrldev->rpdev, &chinfo); + if (!rpdev) { + dev_err(&ctrldev->dev, "failed to create %s channel\n", chinfo.name); + ret = -ENXIO; + } + break; + + case RPMSG_RELEASE_DEV_IOCTL: + ret = rpmsg_release_channel(ctrldev->rpdev, &chinfo); + if (ret) + dev_err(&ctrldev->dev, "failed to release %s channel (%d)\n", + chinfo.name, ret); + break; + + default: + ret = -EINVAL; + } + mutex_unlock(&ctrldev->ctrl_lock); + + return ret; }; static const struct file_operations rpmsg_ctrldev_fops = { @@ -125,6 +152,7 @@ static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev) dev->parent = &rpdev->dev; dev->class = rpmsg_class; + mutex_init(&ctrldev->ctrl_lock); cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops); ctrldev->cdev.owner = THIS_MODULE; diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h index f5ca8740f3fb..1637e68177d9 100644 --- a/include/uapi/linux/rpmsg.h +++ b/include/uapi/linux/rpmsg.h @@ -33,4 +33,14 @@ struct rpmsg_endpoint_info { */ #define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) +/** + * Instantiate a new local rpmsg service device. + */ +#define RPMSG_CREATE_DEV_IOCTL _IOW(0xb5, 0x3, struct rpmsg_endpoint_info) + +/** + * Release a local rpmsg device. + */ +#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info) + #endif -- cgit v1.2.3-71-gd317 From b5fdf66f6eb2560784c6f60131dc567de06267dc Mon Sep 17 00:00:00 2001 From: Dave Wysochanski Date: Tue, 1 Mar 2022 14:37:27 -0500 Subject: NFS: Remove remaining dfprintks related to fscache and remove NFSDBG_FSCACHE The fscache cookie APIs including fscache_acquire_cookie() and fscache_relinquish_cookie() now have very good tracing. Thus, there is no real need for dfprintks in the NFS fscache interface. The NFS fscache interface has removed all dfprintks so remove the NFSDBG_FSCACHE defines. Signed-off-by: Dave Wysochanski Signed-off-by: Trond Myklebust --- fs/nfs/fscache.c | 10 ---------- include/uapi/linux/nfs_fs.h | 2 +- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 841b69aef189..4dee53ceb941 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -21,8 +21,6 @@ #include "fscache.h" #include "nfstrace.h" -#define NFSDBG_FACILITY NFSDBG_FSCACHE - #define NFS_MAX_KEY_LEN 1000 static bool nfs_append_int(char *key, int *_len, unsigned long long x) @@ -129,8 +127,6 @@ int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int u vcookie = fscache_acquire_volume(key, NULL, /* preferred_cache */ NULL, 0 /* coherency_data */); - dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n", - nfss, vcookie); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(key); @@ -153,9 +149,6 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) { struct nfs_server *nfss = NFS_SB(sb); - dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n", - nfss, nfss->fscache); - fscache_relinquish_volume(nfss->fscache, NULL, false); nfss->fscache = NULL; kfree(nfss->fscache_uniq); @@ -193,8 +186,6 @@ void nfs_fscache_clear_inode(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = nfs_i_fscache(inode); - dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie); - fscache_relinquish_cookie(cookie, false); nfsi->fscache = NULL; } @@ -229,7 +220,6 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp) fscache_use_cookie(cookie, open_for_write); if (open_for_write) { - dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); nfs_fscache_update_auxdata(&auxdata, inode); fscache_invalidate(cookie, &auxdata, i_size_read(inode), FSCACHE_INVAL_DIO_WRITE); diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h index 3afe3767c55d..ae0de165c014 100644 --- a/include/uapi/linux/nfs_fs.h +++ b/include/uapi/linux/nfs_fs.h @@ -52,7 +52,7 @@ #define NFSDBG_CALLBACK 0x0100 #define NFSDBG_CLIENT 0x0200 #define NFSDBG_MOUNT 0x0400 -#define NFSDBG_FSCACHE 0x0800 +#define NFSDBG_FSCACHE 0x0800 /* unused */ #define NFSDBG_PNFS 0x1000 #define NFSDBG_PNFS_LD 0x2000 #define NFSDBG_STATE 0x4000 -- cgit v1.2.3-71-gd317 From 2c7d2a230237e7c43fa067d695937b7e484bb92a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 15 Dec 2021 15:39:58 -0500 Subject: btrfs: add definition for EXTENT_TREE_V2 This adds the initial definition of the EXTENT_TREE_V2 incompat feature flag. This also hides the support behind CONFIG_BTRFS_DEBUG. THIS IS A IN DEVELOPMENT FORMAT CHANGE, DO NOT USE UNLESS YOU ARE A DEVELOPER OR A TESTER. The format is in flux and will be added in stages, any fs will need to be re-made between updates to the format. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 21 +++++++++++++++++++++ fs/btrfs/sysfs.c | 5 ++++- include/uapi/linux/btrfs.h | 1 + 3 files changed, 26 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ebb2d109e8bb..54424b341132 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -300,6 +300,26 @@ static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL +#ifdef CONFIG_BTRFS_DEBUG +/* + * Extent tree v2 supported only with CONFIG_BTRFS_DEBUG + */ +#define BTRFS_FEATURE_INCOMPAT_SUPP \ + (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ + BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ + BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ + BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ + BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ + BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ + BTRFS_FEATURE_INCOMPAT_RAID56 | \ + BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ + BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ + BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ + BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ + BTRFS_FEATURE_INCOMPAT_RAID1C34 | \ + BTRFS_FEATURE_INCOMPAT_ZONED | \ + BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2) +#else #define BTRFS_FEATURE_INCOMPAT_SUPP \ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ @@ -314,6 +334,7 @@ static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE); BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ BTRFS_FEATURE_INCOMPAT_RAID1C34 | \ BTRFS_FEATURE_INCOMPAT_ZONED) +#endif #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index beb7f72d50b8..5f4812fd8b50 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -283,9 +283,11 @@ BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID); BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE); BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34); -/* Remove once support for zoned allocation is feature complete */ #ifdef CONFIG_BTRFS_DEBUG +/* Remove once support for zoned allocation is feature complete */ BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED); +/* Remove once support for extent tree v2 is feature complete */ +BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2); #endif #ifdef CONFIG_FS_VERITY BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY); @@ -314,6 +316,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(raid1c34), #ifdef CONFIG_BTRFS_DEBUG BTRFS_FEAT_ATTR_PTR(zoned), + BTRFS_FEAT_ATTR_PTR(extent_tree_v2), #endif #ifdef CONFIG_FS_VERITY BTRFS_FEAT_ATTR_PTR(verity), diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 738619994e26..1cb1a3860f1d 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -309,6 +309,7 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10) #define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11) #define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12) +#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13) struct btrfs_ioctl_feature_flags { __u64 compat_flags; -- cgit v1.2.3-71-gd317 From 9c54e80ddc6bd89596a4046d451908700476fd14 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 15 Dec 2021 15:40:07 -0500 Subject: btrfs: add code to support the block group root This code adds the on disk structures for the block group root, which will hold the block group items for extent tree v2. Signed-off-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 27 ++++++++++++++++++++++- fs/btrfs/disk-io.c | 49 +++++++++++++++++++++++++++++++++++------ fs/btrfs/disk-io.h | 2 ++ fs/btrfs/print-tree.c | 1 + include/trace/events/btrfs.h | 1 + include/uapi/linux/btrfs_tree.h | 3 +++ 6 files changed, 75 insertions(+), 8 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 54424b341132..f460a7bb9ae8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -274,8 +274,14 @@ struct btrfs_super_block { /* the UUID written into btree blocks */ u8 metadata_uuid[BTRFS_FSID_SIZE]; + /* Extent tree v2 */ + __le64 block_group_root; + __le64 block_group_root_generation; + u8 block_group_root_level; + /* future expansion */ - __le64 reserved[28]; + u8 reserved8[7]; + __le64 reserved[25]; u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; @@ -657,6 +663,7 @@ struct btrfs_fs_info { struct btrfs_root *quota_root; struct btrfs_root *uuid_root; struct btrfs_root *data_reloc_root; + struct btrfs_root *block_group_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; @@ -2349,6 +2356,17 @@ BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, num_devices, 64); +/* + * For extent tree v2 we overload the extent root with the block group root, as + * we will have multiple extent roots. + */ +BTRFS_SETGET_STACK_FUNCS(backup_block_group_root, struct btrfs_root_backup, + extent_root, 64); +BTRFS_SETGET_STACK_FUNCS(backup_block_group_root_gen, struct btrfs_root_backup, + extent_root_gen, 64); +BTRFS_SETGET_STACK_FUNCS(backup_block_group_root_level, + struct btrfs_root_backup, extent_root_level, 8); + /* struct btrfs_balance_item */ BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); @@ -2483,6 +2501,13 @@ BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, uuid_tree_generation, 64); +BTRFS_SETGET_STACK_FUNCS(super_block_group_root, struct btrfs_super_block, + block_group_root, 64); +BTRFS_SETGET_STACK_FUNCS(super_block_group_root_generation, + struct btrfs_super_block, + block_group_root_generation, 64); +BTRFS_SETGET_STACK_FUNCS(super_block_group_root_level, struct btrfs_super_block, + block_group_root_level, 8); int btrfs_super_csum_size(const struct btrfs_super_block *s); const char *btrfs_super_csum_name(u16 csum_type); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e014cb8413c7..fe1349737edb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1727,6 +1727,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) btrfs_put_root(fs_info->uuid_root); btrfs_put_root(fs_info->fs_root); btrfs_put_root(fs_info->data_reloc_root); + btrfs_put_root(fs_info->block_group_root); btrfs_check_leaked_roots(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info); kfree(fs_info->super_copy); @@ -2095,7 +2096,6 @@ static void backup_super_roots(struct btrfs_fs_info *info) { const int next_backup = info->backup_root_index; struct btrfs_root_backup *root_backup; - struct btrfs_root *extent_root = btrfs_extent_root(info, 0); struct btrfs_root *csum_root = btrfs_csum_root(info, 0); root_backup = info->super_for_commit->super_roots + next_backup; @@ -2121,11 +2121,23 @@ static void backup_super_roots(struct btrfs_fs_info *info) btrfs_set_backup_chunk_root_level(root_backup, btrfs_header_level(info->chunk_root->node)); - btrfs_set_backup_extent_root(root_backup, extent_root->node->start); - btrfs_set_backup_extent_root_gen(root_backup, - btrfs_header_generation(extent_root->node)); - btrfs_set_backup_extent_root_level(root_backup, - btrfs_header_level(extent_root->node)); + if (btrfs_fs_incompat(info, EXTENT_TREE_V2)) { + btrfs_set_backup_block_group_root(root_backup, + info->block_group_root->node->start); + btrfs_set_backup_block_group_root_gen(root_backup, + btrfs_header_generation(info->block_group_root->node)); + btrfs_set_backup_block_group_root_level(root_backup, + btrfs_header_level(info->block_group_root->node)); + } else { + struct btrfs_root *extent_root = btrfs_extent_root(info, 0); + + btrfs_set_backup_extent_root(root_backup, + extent_root->node->start); + btrfs_set_backup_extent_root_gen(root_backup, + btrfs_header_generation(extent_root->node)); + btrfs_set_backup_extent_root_level(root_backup, + btrfs_header_level(extent_root->node)); + } /* * we might commit during log recovery, which happens before we set @@ -2269,6 +2281,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) free_root_extent_buffers(info->uuid_root); free_root_extent_buffers(info->fs_root); free_root_extent_buffers(info->data_reloc_root); + free_root_extent_buffers(info->block_group_root); if (free_chunk_root) free_root_extent_buffers(info->chunk_root); } @@ -2964,8 +2977,20 @@ static int load_important_roots(struct btrfs_fs_info *fs_info) gen = btrfs_super_generation(sb); level = btrfs_super_root_level(sb); ret = load_super_root(fs_info->tree_root, bytenr, gen, level); - if (ret) + if (ret) { btrfs_warn(fs_info, "couldn't read tree root"); + return ret; + } + + if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) + return 0; + + bytenr = btrfs_super_block_group_root(sb); + gen = btrfs_super_block_group_root_generation(sb); + level = btrfs_super_block_group_root_level(sb); + ret = load_super_root(fs_info->block_group_root, bytenr, gen, level); + if (ret) + btrfs_warn(fs_info, "couldn't read block group root"); return ret; } @@ -2978,6 +3003,16 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) int ret = 0; int i; + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { + struct btrfs_root *root; + + root = btrfs_alloc_root(fs_info, BTRFS_BLOCK_GROUP_TREE_OBJECTID, + GFP_KERNEL); + if (!root) + return -ENOMEM; + fs_info->block_group_root = root; + } + for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { if (handle_error) { if (!IS_ERR(tree_root->node)) diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 5e8bef4b7563..2e10514ecda8 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -111,6 +111,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root) static inline struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info) { + if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) + return fs_info->block_group_root; return btrfs_extent_root(fs_info, 0); } diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0775ae9f4419..524fdb0ddd74 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -23,6 +23,7 @@ static const struct root_name_map root_map[] = { { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, { BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, { BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, + { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" }, { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }, }; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 0d729664b4b4..f068ff30d654 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -53,6 +53,7 @@ struct btrfs_space_info; { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ { BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \ { BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \ + { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },\ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) #define show_root_type(obj) \ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 5416f1f1a77a..b069752a8ecf 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -53,6 +53,9 @@ /* tracks free space in block groups. */ #define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL +/* Holds the block group items for extent tree v2. */ +#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL + /* device stats in the device tree */ #define BTRFS_DEV_STATS_OBJECTID 0ULL -- cgit v1.2.3-71-gd317 From dcb77a9ae87dc1ae2c54ea2e629da357e694b664 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 16 Aug 2021 15:58:29 -0700 Subject: btrfs: add definitions and documentation for encoded I/O ioctls In order to allow sending and receiving compressed data without decompressing it, we need an interface to write pre-compressed data directly to the filesystem and the matching interface to read compressed data without decompressing it. This adds the definitions for ioctls to do that and detailed explanations of how to use them. Reviewed-by: Nikolay Borisov Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- include/uapi/linux/btrfs.h | 132 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 1cb1a3860f1d..d956b2993970 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -869,6 +869,134 @@ struct btrfs_ioctl_get_subvol_rootref_args { __u8 align[7]; }; +/* + * Data and metadata for an encoded read or write. + * + * Encoded I/O bypasses any encoding automatically done by the filesystem (e.g., + * compression). This can be used to read the compressed contents of a file or + * write pre-compressed data directly to a file. + * + * BTRFS_IOC_ENCODED_READ and BTRFS_IOC_ENCODED_WRITE are essentially + * preadv/pwritev with additional metadata about how the data is encoded and the + * size of the unencoded data. + * + * BTRFS_IOC_ENCODED_READ fills the given iovecs with the encoded data, fills + * the metadata fields, and returns the size of the encoded data. It reads one + * extent per call. It can also read data which is not encoded. + * + * BTRFS_IOC_ENCODED_WRITE uses the metadata fields, writes the encoded data + * from the iovecs, and returns the size of the encoded data. Note that the + * encoded data is not validated when it is written; if it is not valid (e.g., + * it cannot be decompressed), then a subsequent read may return an error. + * + * Since the filesystem page cache contains decoded data, encoded I/O bypasses + * the page cache. Encoded I/O requires CAP_SYS_ADMIN. + */ +struct btrfs_ioctl_encoded_io_args { + /* Input parameters for both reads and writes. */ + + /* + * iovecs containing encoded data. + * + * For reads, if the size of the encoded data is larger than the sum of + * iov[n].iov_len for 0 <= n < iovcnt, then the ioctl fails with + * ENOBUFS. + * + * For writes, the size of the encoded data is the sum of iov[n].iov_len + * for 0 <= n < iovcnt. This must be less than 128 KiB (this limit may + * increase in the future). This must also be less than or equal to + * unencoded_len. + */ + const struct iovec __user *iov; + /* Number of iovecs. */ + unsigned long iovcnt; + /* + * Offset in file. + * + * For writes, must be aligned to the sector size of the filesystem. + */ + __s64 offset; + /* Currently must be zero. */ + __u64 flags; + + /* + * For reads, the following members are output parameters that will + * contain the returned metadata for the encoded data. + * For writes, the following members must be set to the metadata for the + * encoded data. + */ + + /* + * Length of the data in the file. + * + * Must be less than or equal to unencoded_len - unencoded_offset. For + * writes, must be aligned to the sector size of the filesystem unless + * the data ends at or beyond the current end of the file. + */ + __u64 len; + /* + * Length of the unencoded (i.e., decrypted and decompressed) data. + * + * For writes, must be no more than 128 KiB (this limit may increase in + * the future). If the unencoded data is actually longer than + * unencoded_len, then it is truncated; if it is shorter, then it is + * extended with zeroes. + */ + __u64 unencoded_len; + /* + * Offset from the first byte of the unencoded data to the first byte of + * logical data in the file. + * + * Must be less than unencoded_len. + */ + __u64 unencoded_offset; + /* + * BTRFS_ENCODED_IO_COMPRESSION_* type. + * + * For writes, must not be BTRFS_ENCODED_IO_COMPRESSION_NONE. + */ + __u32 compression; + /* Currently always BTRFS_ENCODED_IO_ENCRYPTION_NONE. */ + __u32 encryption; + /* + * Reserved for future expansion. + * + * For reads, always returned as zero. Users should check for non-zero + * bytes. If there are any, then the kernel has a newer version of this + * structure with additional information that the user definition is + * missing. + * + * For writes, must be zeroed. + */ + __u8 reserved[64]; +}; + +/* Data is not compressed. */ +#define BTRFS_ENCODED_IO_COMPRESSION_NONE 0 +/* Data is compressed as a single zlib stream. */ +#define BTRFS_ENCODED_IO_COMPRESSION_ZLIB 1 +/* + * Data is compressed as a single zstd frame with the windowLog compression + * parameter set to no more than 17. + */ +#define BTRFS_ENCODED_IO_COMPRESSION_ZSTD 2 +/* + * Data is compressed sector by sector (using the sector size indicated by the + * name of the constant) with LZO1X and wrapped in the format documented in + * fs/btrfs/lzo.c. For writes, the compression sector size must match the + * filesystem sector size. + */ +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_4K 3 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_8K 4 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_16K 5 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_32K 6 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_64K 7 +#define BTRFS_ENCODED_IO_COMPRESSION_TYPES 8 + +/* Data is not encrypted. */ +#define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0 +#define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1 + /* Error codes as returned by the kernel */ enum btrfs_err_code { BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, @@ -997,5 +1125,9 @@ enum btrfs_err_code { struct btrfs_ioctl_ino_lookup_user_args) #define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \ struct btrfs_ioctl_vol_args_v2) +#define BTRFS_IOC_ENCODED_READ _IOR(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args) +#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args) #endif /* _UAPI_LINUX_BTRFS_H */ -- cgit v1.2.3-71-gd317 From 65722ff6181aa52c3d5b0929004af22a3a63e148 Mon Sep 17 00:00:00 2001 From: David Yat Sin Date: Tue, 8 Mar 2022 14:00:50 -0500 Subject: drm/amdkfd: CRIU export dmabuf handles for GTT BOs Export dmabuf handles for GTT BOs so that their contents can be accessed using SDMA during checkpoint/restore. v2: Squash in fix from David to set dmabuf handle to invalid for BOs that cannot be accessed using SDMA during checkpoint/restore. Signed-off-by: David Yat Sin Reviewed-by : Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 18 ++++++++++++++---- include/uapi/linux/kfd_ioctl.h | 5 ++++- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 2c7d76e67ddb..607f65ab39ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1759,14 +1759,18 @@ static int criu_checkpoint_bos(struct kfd_process *p, goto exit; } } - if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + if (bo_bucket->alloc_flags + & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { ret = criu_get_prime_handle(&dumper_bo->tbo.base, bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, &bo_bucket->dmabuf_fd); if (ret) goto exit; + } else { + bo_bucket->dmabuf_fd = KFD_INVALID_FD; } + if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); @@ -1812,7 +1816,8 @@ static int criu_checkpoint_bos(struct kfd_process *p, exit: while (ret && bo_index--) { - if (bo_buckets[bo_index].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) + if (bo_buckets[bo_index].alloc_flags + & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) close_fd(bo_buckets[bo_index].dmabuf_fd); } @@ -2211,12 +2216,16 @@ static int criu_restore_bo(struct kfd_process *p, pr_debug("map memory was successful for the BO\n"); /* create the dmabuf object and export the bo */ - if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + if (bo_bucket->alloc_flags + & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR, &bo_bucket->dmabuf_fd); if (ret) return ret; + } else { + bo_bucket->dmabuf_fd = KFD_INVALID_FD; } + return 0; } @@ -2281,7 +2290,8 @@ static int criu_restore_bos(struct kfd_process *p, exit: while (ret && i--) { - if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) + if (bo_buckets[i].alloc_flags + & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) close_fd(bo_buckets[i].dmabuf_fd); } kvfree(bo_buckets); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index b40687bf1014..42975e940758 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -33,9 +33,10 @@ * - 1.5 - Add SVM API * - 1.6 - Query clear flags in SVM get_attr API * - 1.7 - Checkpoint Restore (CRIU) API + * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 7 +#define KFD_IOCTL_MINOR_VERSION 8 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -195,6 +196,8 @@ struct kfd_ioctl_dbg_wave_control_args { __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ }; +#define KFD_INVALID_FD 0xffffffff + /* Matching HSA_EVENTTYPE */ #define KFD_IOC_EVENT_SIGNAL 0 #define KFD_IOC_EVENT_NODECHANGE 1 -- cgit v1.2.3-71-gd317 From 435fe1c0c1f74b682dba85641406abf4337aade6 Mon Sep 17 00:00:00 2001 From: Eyal Birger Date: Wed, 16 Mar 2022 08:15:57 +0200 Subject: net: geneve: support IPv4/IPv6 as inner protocol This patch adds support for encapsulating IPv4/IPv6 within GENEVE. In order to use this, a new IFLA_GENEVE_INNER_PROTO_INHERIT flag needs to be provided at device creation. This property cannot be changed for the time being. In case IP traffic is received on a non-tun device the drop count is increased. Signed-off-by: Eyal Birger Link: https://lore.kernel.org/r/20220316061557.431872-1-eyal.birger@gmail.com Signed-off-by: Paolo Abeni --- drivers/net/geneve.c | 82 ++++++++++++++++++++++++++++++++++---------- include/uapi/linux/if_link.h | 1 + 2 files changed, 64 insertions(+), 19 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index a895ff756093..8f30660224c5 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -56,6 +56,7 @@ struct geneve_config { bool use_udp6_rx_checksums; bool ttl_inherit; enum ifla_geneve_df df; + bool inner_proto_inherit; }; /* Pseudo network device */ @@ -251,17 +252,24 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } } - skb_reset_mac_header(skb); - skb->protocol = eth_type_trans(skb, geneve->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - if (tun_dst) skb_dst_set(skb, &tun_dst->dst); - /* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { - geneve->dev->stats.rx_errors++; - goto drop; + if (gnvh->proto_type == htons(ETH_P_TEB)) { + skb_reset_mac_header(skb); + skb->protocol = eth_type_trans(skb, geneve->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + /* Ignore packet loops (and multicast echo) */ + if (ether_addr_equal(eth_hdr(skb)->h_source, + geneve->dev->dev_addr)) { + geneve->dev->stats.rx_errors++; + goto drop; + } + } else { + skb_reset_mac_header(skb); + skb->dev = geneve->dev; + skb->pkt_type = PACKET_HOST; } oiph = skb_network_header(skb); @@ -345,6 +353,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) struct genevehdr *geneveh; struct geneve_dev *geneve; struct geneve_sock *gs; + __be16 inner_proto; int opts_len; /* Need UDP and Geneve header to be present */ @@ -356,7 +365,11 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (unlikely(geneveh->ver != GENEVE_VER)) goto drop; - if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) + inner_proto = geneveh->proto_type; + + if (unlikely((inner_proto != htons(ETH_P_TEB) && + inner_proto != htons(ETH_P_IP) && + inner_proto != htons(ETH_P_IPV6)))) goto drop; gs = rcu_dereference_sk_user_data(sk); @@ -367,9 +380,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!geneve) goto drop; + if (unlikely((!geneve->cfg.inner_proto_inherit && + inner_proto != htons(ETH_P_TEB)))) { + geneve->dev->stats.rx_dropped++; + goto drop; + } + opts_len = geneveh->opt_len * 4; - if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, - htons(ETH_P_TEB), + if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto, !net_eq(geneve->net, dev_net(geneve->dev)))) { geneve->dev->stats.rx_dropped++; goto drop; @@ -717,7 +735,8 @@ static int geneve_stop(struct net_device *dev) } static void geneve_build_header(struct genevehdr *geneveh, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 inner_proto) { geneveh->ver = GENEVE_VER; geneveh->opt_len = info->options_len / 4; @@ -725,7 +744,7 @@ static void geneve_build_header(struct genevehdr *geneveh, geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); geneveh->rsvd1 = 0; tunnel_id_to_vni(info->key.tun_id, geneveh->vni); - geneveh->proto_type = htons(ETH_P_TEB); + geneveh->proto_type = inner_proto; geneveh->rsvd2 = 0; if (info->key.tun_flags & TUNNEL_GENEVE_OPT) @@ -734,10 +753,12 @@ static void geneve_build_header(struct genevehdr *geneveh, static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, const struct ip_tunnel_info *info, - bool xnet, int ip_hdr_len) + bool xnet, int ip_hdr_len, + bool inner_proto_inherit) { bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); struct genevehdr *gnvh; + __be16 inner_proto; int min_headroom; int err; @@ -755,8 +776,9 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, goto free_dst; gnvh = __skb_push(skb, sizeof(*gnvh) + info->options_len); - geneve_build_header(gnvh, info); - skb_set_inner_protocol(skb, htons(ETH_P_TEB)); + inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB); + geneve_build_header(gnvh, info, inner_proto); + skb_set_inner_protocol(skb, inner_proto); return 0; free_dst: @@ -959,7 +981,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } } - err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr)); + err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), + geneve->cfg.inner_proto_inherit); if (unlikely(err)) return err; @@ -1038,7 +1061,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = key->ttl; ttl = ttl ? : ip6_dst_hoplimit(dst); } - err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr)); + err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), + geneve->cfg.inner_proto_inherit); if (unlikely(err)) return err; @@ -1388,6 +1412,14 @@ static int geneve_configure(struct net *net, struct net_device *dev, dst_cache_reset(&geneve->cfg.info.dst_cache); memcpy(&geneve->cfg, cfg, sizeof(*cfg)); + if (geneve->cfg.inner_proto_inherit) { + dev->header_ops = NULL; + dev->type = ARPHRD_NONE; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->flags = IFF_NOARP; + } + err = register_netdevice(dev); if (err) return err; @@ -1561,10 +1593,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], #endif } + if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) { + if (changelink) { + attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT; + goto change_notsup; + } + cfg->inner_proto_inherit = true; + } + return 0; change_notsup: NL_SET_ERR_MSG_ATTR(extack, data[attrtype], - "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported"); + "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, and UDP checksum attributes are not supported"); return -EOPNOTSUPP; } @@ -1799,6 +1839,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) goto nla_put_failure; + if (geneve->cfg.inner_proto_inherit && + nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index bd24c7dc10a2..cc284c048e69 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -842,6 +842,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) -- cgit v1.2.3-71-gd317 From ec7328b59176227216c461601c6bd0e922232a9b Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:43 +0100 Subject: net: bridge: mst: Multiple Spanning Tree (MST) mode Allow the user to switch from the current per-VLAN STP mode to an MST mode. Up to this point, per-VLAN STP states where always isolated from each other. This is in contrast to the MSTP standard (802.1Q-2018, Clause 13.5), where VLANs are grouped into MST instances (MSTIs), and the state is managed on a per-MSTI level, rather that at the per-VLAN level. Perhaps due to the prevalence of the standard, many switching ASICs are built after the same model. Therefore, add a corresponding MST mode to the bridge, which we can later add offloading support for in a straight-forward way. For now, all VLANs are fixed to MSTI 0, also called the Common Spanning Tree (CST). That is, all VLANs will follow the port-global state. Upcoming changes will make this actually useful by allowing VLANs to be mapped to arbitrary MSTIs and allow individual MSTI states to be changed. Signed-off-by: Tobias Waldekranz Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/uapi/linux/if_bridge.h | 1 + net/bridge/Makefile | 2 +- net/bridge/br.c | 5 +++ net/bridge/br_input.c | 17 ++++++++- net/bridge/br_mst.c | 87 ++++++++++++++++++++++++++++++++++++++++++ net/bridge/br_private.h | 37 ++++++++++++++++++ net/bridge/br_stp.c | 6 +++ net/bridge/br_vlan.c | 20 +++++++++- net/bridge/br_vlan_options.c | 5 +++ 9 files changed, 176 insertions(+), 4 deletions(-) create mode 100644 net/bridge/br_mst.c (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 2711c3522010..30a242195ced 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -759,6 +759,7 @@ struct br_mcast_stats { enum br_boolopt_id { BR_BOOLOPT_NO_LL_LEARN, BR_BOOLOPT_MCAST_VLAN_SNOOPING, + BR_BOOLOPT_MST_ENABLE, BR_BOOLOPT_MAX }; diff --git a/net/bridge/Makefile b/net/bridge/Makefile index 7fb9a021873b..24bd1c0a9a5a 100644 --- a/net/bridge/Makefile +++ b/net/bridge/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o br_multicast_eht.o -bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o +bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o br_mst.o bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o diff --git a/net/bridge/br.c b/net/bridge/br.c index b1dea3febeea..96e91d69a9a8 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -265,6 +265,9 @@ int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on, case BR_BOOLOPT_MCAST_VLAN_SNOOPING: err = br_multicast_toggle_vlan_snooping(br, on, extack); break; + case BR_BOOLOPT_MST_ENABLE: + err = br_mst_set_enabled(br, on, extack); + break; default: /* shouldn't be called with unsupported options */ WARN_ON(1); @@ -281,6 +284,8 @@ int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt) return br_opt_get(br, BROPT_NO_LL_LEARN); case BR_BOOLOPT_MCAST_VLAN_SNOOPING: return br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED); + case BR_BOOLOPT_MST_ENABLE: + return br_opt_get(br, BROPT_MST_ENABLED); default: /* shouldn't be called with unsupported options */ WARN_ON(1); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e0c13fcc50ed..196417859c4a 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -78,13 +78,22 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb u16 vid = 0; u8 state; - if (!p || p->state == BR_STATE_DISABLED) + if (!p) goto drop; br = p->br; + + if (br_mst_is_enabled(br)) { + state = BR_STATE_FORWARDING; + } else { + if (p->state == BR_STATE_DISABLED) + goto drop; + + state = p->state; + } + brmctx = &p->br->multicast_ctx; pmctx = &p->multicast_ctx; - state = p->state; if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, &state, &vlan)) goto out; @@ -370,9 +379,13 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; forward: + if (br_mst_is_enabled(p->br)) + goto defer_stp_filtering; + switch (p->state) { case BR_STATE_FORWARDING: case BR_STATE_LEARNING: +defer_stp_filtering: if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c new file mode 100644 index 000000000000..0f9f596f86bc --- /dev/null +++ b/net/bridge/br_mst.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Bridge Multiple Spanning Tree Support + * + * Authors: + * Tobias Waldekranz + */ + +#include + +#include "br_private.h" + +DEFINE_STATIC_KEY_FALSE(br_mst_used); + +static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v, + u8 state) +{ + struct net_bridge_vlan_group *vg = nbp_vlan_group(p); + + if (v->state == state) + return; + + br_vlan_set_state(v, state); + + if (v->vid == vg->pvid) + br_vlan_set_pvid_state(vg, state); +} + +int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, + struct netlink_ext_ack *extack) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + + vg = nbp_vlan_group(p); + if (!vg) + return 0; + + list_for_each_entry(v, &vg->vlan_list, vlist) { + if (v->brvlan->msti != msti) + continue; + + br_mst_vlan_set_state(p, v, state); + } + + return 0; +} + +void br_mst_vlan_init_state(struct net_bridge_vlan *v) +{ + /* VLANs always start out in MSTI 0 (CST) */ + v->msti = 0; + + if (br_vlan_is_master(v)) + v->state = BR_STATE_FORWARDING; + else + v->state = v->port->state; +} + +int br_mst_set_enabled(struct net_bridge *br, bool on, + struct netlink_ext_ack *extack) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_port *p; + + list_for_each_entry(p, &br->port_list, list) { + vg = nbp_vlan_group(p); + + if (!vg->num_vlans) + continue; + + NL_SET_ERR_MSG(extack, + "MST mode can't be changed while VLANs exist"); + return -EBUSY; + } + + if (br_opt_get(br, BROPT_MST_ENABLED) == on) + return 0; + + if (on) + static_branch_enable(&br_mst_used); + else + static_branch_disable(&br_mst_used); + + br_opt_toggle(br, BROPT_MST_ENABLED, on); + return 0; +} diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 48bc61ebc211..c2190c8841fb 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -178,6 +178,7 @@ enum { * @br_mcast_ctx: if MASTER flag set, this is the global vlan multicast context * @port_mcast_ctx: if MASTER flag unset, this is the per-port/vlan multicast * context + * @msti: if MASTER flag set, this holds the VLANs MST instance * @vlist: sorted list of VLAN entries * @rcu: used for entry destruction * @@ -210,6 +211,8 @@ struct net_bridge_vlan { struct net_bridge_mcast_port port_mcast_ctx; }; + u16 msti; + struct list_head vlist; struct rcu_head rcu; @@ -445,6 +448,7 @@ enum net_bridge_opts { BROPT_NO_LL_LEARN, BROPT_VLAN_BRIDGE_BINDING, BROPT_MCAST_VLAN_SNOOPING_ENABLED, + BROPT_MST_ENABLED, }; struct net_bridge { @@ -1765,6 +1769,39 @@ static inline bool br_vlan_state_allowed(u8 state, bool learn_allow) } #endif +/* br_mst.c */ +#ifdef CONFIG_BRIDGE_VLAN_FILTERING +DECLARE_STATIC_KEY_FALSE(br_mst_used); +static inline bool br_mst_is_enabled(struct net_bridge *br) +{ + return static_branch_unlikely(&br_mst_used) && + br_opt_get(br, BROPT_MST_ENABLED); +} + +int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, + struct netlink_ext_ack *extack); +void br_mst_vlan_init_state(struct net_bridge_vlan *v); +int br_mst_set_enabled(struct net_bridge *br, bool on, + struct netlink_ext_ack *extack); +#else +static inline bool br_mst_is_enabled(struct net_bridge *br) +{ + return false; +} + +static inline int br_mst_set_state(struct net_bridge_port *p, u16 msti, + u8 state, struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + +static inline int br_mst_set_enabled(struct net_bridge *br, bool on, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} +#endif + struct nf_br_ops { int (*br_dev_xmit_hook)(struct sk_buff *skb); }; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 1d80f34a139c..7d27b2e6038f 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -43,6 +43,12 @@ void br_set_state(struct net_bridge_port *p, unsigned int state) return; p->state = state; + if (br_opt_get(p->br, BROPT_MST_ENABLED)) { + err = br_mst_set_state(p, 0, state, NULL); + if (err) + br_warn(p->br, "error setting MST state on port %u(%s)\n", + p->port_no, netdev_name(p->dev)); + } err = switchdev_port_attr_set(p->dev, &attr, NULL); if (err && err != -EOPNOTSUPP) br_warn(p->br, "error setting offload STP state on port %u(%s)\n", diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 7557e90b60e1..0f5e75ccac79 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -226,6 +226,24 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu) kfree(v); } +static void br_vlan_init_state(struct net_bridge_vlan *v) +{ + struct net_bridge *br; + + if (br_vlan_is_master(v)) + br = v->br; + else + br = v->port->br; + + if (br_opt_get(br, BROPT_MST_ENABLED)) { + br_mst_vlan_init_state(v); + return; + } + + v->state = BR_STATE_FORWARDING; + v->msti = 0; +} + /* This is the shared VLAN add function which works for both ports and bridge * devices. There are four possible calls to this function in terms of the * vlan entry type: @@ -322,7 +340,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, } /* set the state before publishing */ - v->state = BR_STATE_FORWARDING; + br_vlan_init_state(v); err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, br_vlan_rht_params); diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c index a6382973b3e7..09112b56e79c 100644 --- a/net/bridge/br_vlan_options.c +++ b/net/bridge/br_vlan_options.c @@ -99,6 +99,11 @@ static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, return -EBUSY; } + if (br_opt_get(br, BROPT_MST_ENABLED)) { + NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state directly when MST is enabled"); + return -EBUSY; + } + if (v->state == state) return 0; -- cgit v1.2.3-71-gd317 From 8c678d60562f3e5f6d0a5f5465e27930ffedb8ca Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:44 +0100 Subject: net: bridge: mst: Allow changing a VLAN's MSTI Allow a VLAN to move out of the CST (MSTI 0), to an independent tree. The user manages the VID to MSTI mappings via a global VLAN setting. The proposed iproute2 interface would be: bridge vlan global set dev br0 vid msti Changing the state in non-zero MSTIs is still not supported, but will be addressed in upcoming changes. Signed-off-by: Tobias Waldekranz Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/uapi/linux/if_bridge.h | 1 + net/bridge/br_mst.c | 42 ++++++++++++++++++++++++++++++++++++++++++ net/bridge/br_private.h | 1 + net/bridge/br_vlan_options.c | 15 +++++++++++++++ 4 files changed, 59 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 30a242195ced..f60244b747ae 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -564,6 +564,7 @@ enum { BRIDGE_VLANDB_GOPTS_MCAST_QUERIER, BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE, + BRIDGE_VLANDB_GOPTS_MSTI, __BRIDGE_VLANDB_GOPTS_MAX }; #define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1) diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 0f9f596f86bc..d7a7b5d7ddb3 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -46,6 +46,48 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, return 0; } +static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti) +{ + struct net_bridge_vlan_group *vg = nbp_vlan_group(pv->port); + struct net_bridge_vlan *v; + + list_for_each_entry(v, &vg->vlan_list, vlist) { + /* If this port already has a defined state in this + * MSTI (through some other VLAN membership), inherit + * it. + */ + if (v != pv && v->brvlan->msti == msti) { + br_mst_vlan_set_state(pv->port, pv, v->state); + return; + } + } + + /* Otherwise, start out in a new MSTI with all ports disabled. */ + return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED); +} + +int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *pv; + struct net_bridge_port *p; + + if (mv->msti == msti) + return 0; + + mv->msti = msti; + + list_for_each_entry(p, &mv->br->port_list, list) { + vg = nbp_vlan_group(p); + + pv = br_vlan_find(vg, mv->vid); + if (pv) + br_mst_vlan_sync_state(pv, msti); + } + + return 0; +} + void br_mst_vlan_init_state(struct net_bridge_vlan *v) { /* VLANs always start out in MSTI 0 (CST) */ diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c2190c8841fb..3978e1d9ffb5 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1780,6 +1780,7 @@ static inline bool br_mst_is_enabled(struct net_bridge *br) int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, struct netlink_ext_ack *extack); +int br_mst_vlan_set_msti(struct net_bridge_vlan *v, u16 msti); void br_mst_vlan_init_state(struct net_bridge_vlan *v); int br_mst_set_enabled(struct net_bridge *br, bool on, struct netlink_ext_ack *extack); diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c index 09112b56e79c..a2724d03278c 100644 --- a/net/bridge/br_vlan_options.c +++ b/net/bridge/br_vlan_options.c @@ -296,6 +296,7 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *r_end) { return v_curr->vid - r_end->vid == 1 && + v_curr->msti == r_end->msti && ((v_curr->priv_flags ^ r_end->priv_flags) & BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 && br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx, @@ -384,6 +385,9 @@ bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, #endif #endif + if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_MSTI, v_opts->msti)) + goto out_err; + nla_nest_end(skb, nest); return true; @@ -415,6 +419,7 @@ static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v) + nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ + br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ #endif + + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_MSTI */ + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */ } @@ -564,6 +569,15 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br, } #endif #endif + if (tb[BRIDGE_VLANDB_GOPTS_MSTI]) { + u16 msti; + + msti = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_MSTI]); + err = br_mst_vlan_set_msti(v, msti); + if (err) + return err; + *changed = true; + } return 0; } @@ -583,6 +597,7 @@ static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = { [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, + [BRIDGE_VLANDB_GOPTS_MSTI] = NLA_POLICY_MAX(NLA_U16, VLAN_N_VID - 1), }; int br_vlan_rtm_process_global_options(struct net_device *dev, -- cgit v1.2.3-71-gd317 From 122c29486e1ff78033c45d0d31c710e7dc8945a5 Mon Sep 17 00:00:00 2001 From: Tobias Waldekranz Date: Wed, 16 Mar 2022 16:08:45 +0100 Subject: net: bridge: mst: Support setting and reporting MST port states Make it possible to change the port state in a given MSTI by extending the bridge port netlink interface (RTM_SETLINK on PF_BRIDGE).The proposed iproute2 interface would be: bridge mst set dev msti state Current states in all applicable MSTIs can also be dumped via a corresponding RTM_GETLINK. The proposed iproute interface looks like this: $ bridge mst port msti vb1 0 state forwarding 100 state disabled vb2 0 state forwarding 100 state forwarding The preexisting per-VLAN states are still valid in the MST mode (although they are read-only), and can be queried as usual if one is interested in knowing a particular VLAN's state without having to care about the VID to MSTI mapping (in this example VLAN 20 and 30 are bound to MSTI 100): $ bridge -d vlan port vlan-id vb1 10 state forwarding mcast_router 1 20 state disabled mcast_router 1 30 state disabled mcast_router 1 40 state forwarding mcast_router 1 vb2 10 state forwarding mcast_router 1 20 state forwarding mcast_router 1 30 state forwarding mcast_router 1 40 state forwarding mcast_router 1 Signed-off-by: Tobias Waldekranz Acked-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/uapi/linux/if_bridge.h | 16 ++++++ include/uapi/linux/rtnetlink.h | 1 + net/bridge/br_mst.c | 126 +++++++++++++++++++++++++++++++++++++++++ net/bridge/br_netlink.c | 44 +++++++++++++- net/bridge/br_private.h | 23 ++++++++ 5 files changed, 209 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index f60244b747ae..a86a7e7b811f 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -122,6 +122,7 @@ enum { IFLA_BRIDGE_VLAN_TUNNEL_INFO, IFLA_BRIDGE_MRP, IFLA_BRIDGE_CFM, + IFLA_BRIDGE_MST, __IFLA_BRIDGE_MAX, }; #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) @@ -453,6 +454,21 @@ enum { #define IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX (__IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX - 1) +enum { + IFLA_BRIDGE_MST_UNSPEC, + IFLA_BRIDGE_MST_ENTRY, + __IFLA_BRIDGE_MST_MAX, +}; +#define IFLA_BRIDGE_MST_MAX (__IFLA_BRIDGE_MST_MAX - 1) + +enum { + IFLA_BRIDGE_MST_ENTRY_UNSPEC, + IFLA_BRIDGE_MST_ENTRY_MSTI, + IFLA_BRIDGE_MST_ENTRY_STATE, + __IFLA_BRIDGE_MST_ENTRY_MAX, +}; +#define IFLA_BRIDGE_MST_ENTRY_MAX (__IFLA_BRIDGE_MST_ENTRY_MAX - 1) + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 51530aade46e..83849a37db5b 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -817,6 +817,7 @@ enum { #define RTEXT_FILTER_MRP (1 << 4) #define RTEXT_FILTER_CFM_CONFIG (1 << 5) #define RTEXT_FILTER_CFM_STATUS (1 << 6) +#define RTEXT_FILTER_MST (1 << 7) /* End of information exported to user level */ diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index d7a7b5d7ddb3..5c1831c73fc2 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -127,3 +127,129 @@ int br_mst_set_enabled(struct net_bridge *br, bool on, br_opt_toggle(br, BROPT_MST_ENABLED, on); return 0; } + +size_t br_mst_info_size(const struct net_bridge_vlan_group *vg) +{ + DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 }; + const struct net_bridge_vlan *v; + size_t sz; + + /* IFLA_BRIDGE_MST */ + sz = nla_total_size(0); + + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { + if (test_bit(v->brvlan->msti, seen)) + continue; + + /* IFLA_BRIDGE_MST_ENTRY */ + sz += nla_total_size(0) + + /* IFLA_BRIDGE_MST_ENTRY_MSTI */ + nla_total_size(sizeof(u16)) + + /* IFLA_BRIDGE_MST_ENTRY_STATE */ + nla_total_size(sizeof(u8)); + + __set_bit(v->brvlan->msti, seen); + } + + return sz; +} + +int br_mst_fill_info(struct sk_buff *skb, + const struct net_bridge_vlan_group *vg) +{ + DECLARE_BITMAP(seen, VLAN_N_VID) = { 0 }; + const struct net_bridge_vlan *v; + struct nlattr *nest; + int err = 0; + + list_for_each_entry(v, &vg->vlan_list, vlist) { + if (test_bit(v->brvlan->msti, seen)) + continue; + + nest = nla_nest_start_noflag(skb, IFLA_BRIDGE_MST_ENTRY); + if (!nest || + nla_put_u16(skb, IFLA_BRIDGE_MST_ENTRY_MSTI, v->brvlan->msti) || + nla_put_u8(skb, IFLA_BRIDGE_MST_ENTRY_STATE, v->state)) { + err = -EMSGSIZE; + break; + } + nla_nest_end(skb, nest); + + __set_bit(v->brvlan->msti, seen); + } + + return err; +} + +static const struct nla_policy br_mst_nl_policy[IFLA_BRIDGE_MST_ENTRY_MAX + 1] = { + [IFLA_BRIDGE_MST_ENTRY_MSTI] = NLA_POLICY_RANGE(NLA_U16, + 1, /* 0 reserved for CST */ + VLAN_N_VID - 1), + [IFLA_BRIDGE_MST_ENTRY_STATE] = NLA_POLICY_RANGE(NLA_U8, + BR_STATE_DISABLED, + BR_STATE_BLOCKING), +}; + +static int br_mst_process_one(struct net_bridge_port *p, + const struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MST_ENTRY_MAX + 1]; + u16 msti; + u8 state; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MST_ENTRY_MAX, attr, + br_mst_nl_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MST_ENTRY_MSTI]) { + NL_SET_ERR_MSG_MOD(extack, "MSTI not specified"); + return -EINVAL; + } + + if (!tb[IFLA_BRIDGE_MST_ENTRY_STATE]) { + NL_SET_ERR_MSG_MOD(extack, "State not specified"); + return -EINVAL; + } + + msti = nla_get_u16(tb[IFLA_BRIDGE_MST_ENTRY_MSTI]); + state = nla_get_u8(tb[IFLA_BRIDGE_MST_ENTRY_STATE]); + + return br_mst_set_state(p, msti, state, extack); +} + +int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *attr; + int err, msts = 0; + int rem; + + if (!br_opt_get(p->br, BROPT_MST_ENABLED)) { + NL_SET_ERR_MSG_MOD(extack, "Can't modify MST state when MST is disabled"); + return -EBUSY; + } + + nla_for_each_nested(attr, mst_attr, rem) { + switch (nla_type(attr)) { + case IFLA_BRIDGE_MST_ENTRY: + err = br_mst_process_one(p, attr, extack); + break; + default: + continue; + } + + msts++; + if (err) + break; + } + + if (!msts) { + NL_SET_ERR_MSG_MOD(extack, "Found no MST entries to process"); + err = -EINVAL; + } + + return err; +} diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 7d4432ca9a20..a8d90fa8621e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -119,6 +119,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, /* Each VLAN is returned in bridge_vlan_info along with flags */ vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); + if (filter_mask & RTEXT_FILTER_MST) + vinfo_sz += br_mst_info_size(vg); + if (!(filter_mask & RTEXT_FILTER_CFM_STATUS)) return vinfo_sz; @@ -485,7 +488,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, RTEXT_FILTER_BRVLAN_COMPRESSED | RTEXT_FILTER_MRP | RTEXT_FILTER_CFM_CONFIG | - RTEXT_FILTER_CFM_STATUS)) { + RTEXT_FILTER_CFM_STATUS | + RTEXT_FILTER_MST)) { af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; @@ -564,7 +568,28 @@ static int br_fill_ifinfo(struct sk_buff *skb, nla_nest_end(skb, cfm_nest); } + if ((filter_mask & RTEXT_FILTER_MST) && + br_opt_get(br, BROPT_MST_ENABLED) && port) { + const struct net_bridge_vlan_group *vg = nbp_vlan_group(port); + struct nlattr *mst_nest; + int err; + + if (!vg || !vg->num_vlans) + goto done; + + mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST); + if (!mst_nest) + goto nla_put_failure; + + err = br_mst_fill_info(skb, vg); + if (err) + goto nla_put_failure; + + nla_nest_end(skb, mst_nest); + } + done: + if (af) nla_nest_end(skb, af); nlmsg_end(skb, nlh); @@ -803,6 +828,23 @@ static int br_afspec(struct net_bridge *br, if (err) return err; break; + case IFLA_BRIDGE_MST: + if (!p) { + NL_SET_ERR_MSG(extack, + "MST states can only be set on bridge ports"); + return -EINVAL; + } + + if (cmd != RTM_SETLINK) { + NL_SET_ERR_MSG(extack, + "MST states can only be set through RTM_SETLINK"); + return -EINVAL; + } + + err = br_mst_process(p, attr, extack); + if (err) + return err; + break; } } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3978e1d9ffb5..18ccc3d5d296 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1784,6 +1784,11 @@ int br_mst_vlan_set_msti(struct net_bridge_vlan *v, u16 msti); void br_mst_vlan_init_state(struct net_bridge_vlan *v); int br_mst_set_enabled(struct net_bridge *br, bool on, struct netlink_ext_ack *extack); +size_t br_mst_info_size(const struct net_bridge_vlan_group *vg); +int br_mst_fill_info(struct sk_buff *skb, + const struct net_bridge_vlan_group *vg); +int br_mst_process(struct net_bridge_port *p, const struct nlattr *mst_attr, + struct netlink_ext_ack *extack); #else static inline bool br_mst_is_enabled(struct net_bridge *br) { @@ -1801,6 +1806,24 @@ static inline int br_mst_set_enabled(struct net_bridge *br, bool on, { return -EOPNOTSUPP; } + +static inline size_t br_mst_info_size(const struct net_bridge_vlan_group *vg) +{ + return 0; +} + +static inline int br_mst_fill_info(struct sk_buff *skb, + const struct net_bridge_vlan_group *vg) +{ + return -EOPNOTSUPP; +} + +static inline int br_mst_process(struct net_bridge_port *p, + const struct nlattr *mst_attr, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} #endif struct nf_br_ops { -- cgit v1.2.3-71-gd317 From 0dcac272540613d41c05e89679e4ddb978b612f1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:09 +0100 Subject: bpf: Add multi kprobe link Adding new link type BPF_LINK_TYPE_KPROBE_MULTI that attaches kprobe program through fprobe API. The fprobe API allows to attach probe on multiple functions at once very fast, because it works on top of ftrace. On the other hand this limits the probe point to the function entry or return. The kprobe program gets the same pt_regs input ctx as when it's attached through the perf API. Adding new attach type BPF_TRACE_KPROBE_MULTI that allows attachment kprobe to multiple function with new link. User provides array of addresses or symbols with count to attach the kprobe program to. The new link_create uapi interface looks like: struct { __u32 flags; __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; } kprobe_multi; The flags field allows single BPF_TRACE_KPROBE_MULTI bit to create return multi kprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-4-jolsa@kernel.org --- include/linux/bpf_types.h | 1 + include/linux/trace_events.h | 7 ++ include/uapi/linux/bpf.h | 13 +++ kernel/bpf/syscall.c | 26 ++++- kernel/trace/bpf_trace.c | 211 +++++++++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 13 +++ 6 files changed, 266 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 48a91c51c015..3e24ad0c4b3c 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -140,3 +140,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif +BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index dcea51fb60e2..8f0e9e7cb493 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -15,6 +15,7 @@ struct array_buffer; struct tracer; struct dentry; struct bpf_prog; +union bpf_attr; const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, @@ -738,6 +739,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -779,6 +781,11 @@ static inline int bpf_get_perf_event_info(const struct perf_event *event, { return -EOPNOTSUPP; } +static inline int +bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9beb585be5a6..b8bb67ee6c57 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -32,6 +32,7 @@ #include #include #include +#include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ @@ -3022,6 +3023,11 @@ out_put_file: fput(perf_file); return err; } +#else +static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PERF_EVENTS */ #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd @@ -4255,7 +4261,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len +#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.addrs static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; @@ -4279,7 +4285,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = tracing_bpf_link_attach(attr, uattr, prog); goto out; case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_KPROBE: case BPF_PROG_TYPE_TRACEPOINT: if (attr->link_create.attach_type != BPF_PERF_EVENT) { ret = -EINVAL; @@ -4287,6 +4292,14 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) } ptype = prog->type; break; + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type != BPF_PERF_EVENT && + attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { + ret = -EINVAL; + goto out; + } + ptype = prog->type; + break; default: ptype = attach_type_to_prog_type(attr->link_create.attach_type); if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { @@ -4318,13 +4331,16 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = bpf_xdp_link_attach(attr, prog); break; #endif -#ifdef CONFIG_PERF_EVENTS case BPF_PROG_TYPE_PERF_EVENT: case BPF_PROG_TYPE_TRACEPOINT: - case BPF_PROG_TYPE_KPROBE: ret = bpf_perf_link_attach(attr, prog); break; -#endif + case BPF_PROG_TYPE_KPROBE: + if (attr->link_create.attach_type == BPF_PERF_EVENT) + ret = bpf_perf_link_attach(attr, prog); + else + ret = bpf_kprobe_multi_link_attach(attr, prog); + break; default: ret = -EINVAL; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a2024ba32a20..fffa2171fae4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -2181,3 +2182,213 @@ static int __init bpf_event_init(void) fs_initcall(bpf_event_init); #endif /* CONFIG_MODULES */ + +#ifdef CONFIG_FPROBE +struct bpf_kprobe_multi_link { + struct bpf_link link; + struct fprobe fp; + unsigned long *addrs; +}; + +static void bpf_kprobe_multi_link_release(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + unregister_fprobe(&kmulti_link->fp); +} + +static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) +{ + struct bpf_kprobe_multi_link *kmulti_link; + + kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); + kvfree(kmulti_link->addrs); + kfree(kmulti_link); +} + +static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { + .release = bpf_kprobe_multi_link_release, + .dealloc = bpf_kprobe_multi_link_dealloc, +}; + +static int +kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, + struct pt_regs *regs) +{ + int err; + + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { + err = 0; + goto out; + } + + migrate_disable(); + rcu_read_lock(); + err = bpf_prog_run(link->link.prog, regs); + rcu_read_unlock(); + migrate_enable(); + + out: + __this_cpu_dec(bpf_prog_active); + return err; +} + +static void +kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, + struct pt_regs *regs) +{ + unsigned long saved_ip = instruction_pointer(regs); + struct bpf_kprobe_multi_link *link; + + /* + * Because fprobe's regs->ip is set to the next instruction of + * dynamic-ftrace instruction, correct entry ip must be set, so + * that the bpf program can access entry address via regs as same + * as kprobes. + * + * Both kprobe and kretprobe see the entry ip of traced function + * as instruction pointer. + */ + instruction_pointer_set(regs, entry_ip); + + link = container_of(fp, struct bpf_kprobe_multi_link, fp); + kprobe_multi_link_prog_run(link, regs); + + instruction_pointer_set(regs, saved_ip); +} + +static int +kprobe_multi_resolve_syms(const void *usyms, u32 cnt, + unsigned long *addrs) +{ + unsigned long addr, size; + const char **syms; + int err = -ENOMEM; + unsigned int i; + char *func; + + size = cnt * sizeof(*syms); + syms = kvzalloc(size, GFP_KERNEL); + if (!syms) + return -ENOMEM; + + func = kmalloc(KSYM_NAME_LEN, GFP_KERNEL); + if (!func) + goto error; + + if (copy_from_user(syms, usyms, size)) { + err = -EFAULT; + goto error; + } + + for (i = 0; i < cnt; i++) { + err = strncpy_from_user(func, syms[i], KSYM_NAME_LEN); + if (err == KSYM_NAME_LEN) + err = -E2BIG; + if (err < 0) + goto error; + err = -EINVAL; + addr = kallsyms_lookup_name(func); + if (!addr) + goto error; + if (!kallsyms_lookup_size_offset(addr, &size, NULL)) + goto error; + addr = ftrace_location_range(addr, addr + size - 1); + if (!addr) + goto error; + addrs[i] = addr; + } + + err = 0; +error: + kvfree(syms); + kfree(func); + return err; +} + +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_kprobe_multi_link *link = NULL; + struct bpf_link_primer link_primer; + unsigned long *addrs; + u32 flags, cnt, size; + void __user *uaddrs; + void __user *usyms; + int err; + + /* no support for 32bit archs yet */ + if (sizeof(u64) != sizeof(void *)) + return -EOPNOTSUPP; + + if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + + flags = attr->link_create.kprobe_multi.flags; + if (flags & ~BPF_F_KPROBE_MULTI_RETURN) + return -EINVAL; + + uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); + usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); + if (!!uaddrs == !!usyms) + return -EINVAL; + + cnt = attr->link_create.kprobe_multi.cnt; + if (!cnt) + return -EINVAL; + + size = cnt * sizeof(*addrs); + addrs = kvmalloc(size, GFP_KERNEL); + if (!addrs) + return -ENOMEM; + + if (uaddrs) { + if (copy_from_user(addrs, uaddrs, size)) { + err = -EFAULT; + goto error; + } + } else { + err = kprobe_multi_resolve_syms(usyms, cnt, addrs); + if (err) + goto error; + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + err = -ENOMEM; + goto error; + } + + bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, + &bpf_kprobe_multi_link_lops, prog); + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto error; + + if (flags & BPF_F_KPROBE_MULTI_RETURN) + link->fp.exit_handler = kprobe_multi_link_handler; + else + link->fp.entry_handler = kprobe_multi_link_handler; + + link->addrs = addrs; + + err = register_fprobe_ips(&link->fp, addrs, cnt); + if (err) { + bpf_link_cleanup(&link_primer); + return err; + } + + return bpf_link_settle(&link_primer); + +error: + kfree(link); + kvfree(addrs); + return err; +} +#else /* !CONFIG_FPROBE */ +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 99fab54ae9c0..d77f47af7752 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -997,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1011,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1118,6 +1120,11 @@ enum bpf_link_type { */ #define BPF_F_XDP_HAS_FRAGS (1U << 5) +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1475,6 +1482,12 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + } kprobe_multi; }; } link_create; -- cgit v1.2.3-71-gd317 From ca74823c6e16dd42b7cf60d9fdde80e2a81a67bb Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:12 +0100 Subject: bpf: Add cookie support to programs attached with kprobe multi link Adding support to call bpf_get_attach_cookie helper from kprobe programs attached with kprobe multi link. The cookie is provided by array of u64 values, where each value is paired with provided function address or symbol with the same array index. When cookie array is provided it's sorted together with addresses (check bpf_kprobe_multi_cookie_swap). This way we can find cookie based on the address in bpf_get_attach_cookie helper. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-7-jolsa@kernel.org --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 2 +- kernel/trace/bpf_trace.c | 114 ++++++++++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 1 + 4 files changed, 116 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d77f47af7752..7604e7d5438f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1487,6 +1487,7 @@ union bpf_attr { __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; + __aligned_u64 cookies; } kprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b8bb67ee6c57..cdaa1152436a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4261,7 +4261,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.addrs +#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies static int link_create(union bpf_attr *attr, bpfptr_t uattr) { enum bpf_prog_type ptype; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0e7f8c9bc756..9a7b6be655e4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include @@ -78,6 +80,7 @@ u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags, const struct btf **btf, s32 *btf_id); +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip); /** * trace_call_bpf - invoke BPF program @@ -1050,6 +1053,18 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) +{ + return bpf_kprobe_multi_cookie(current->bpf_ctx, instruction_pointer(regs)); +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { + .func = bpf_get_attach_cookie_kprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1297,7 +1312,9 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) &bpf_get_func_ip_proto_kprobe_multi : &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: - return &bpf_get_attach_cookie_proto_trace; + return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? + &bpf_get_attach_cookie_proto_kmulti : + &bpf_get_attach_cookie_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -2203,6 +2220,13 @@ struct bpf_kprobe_multi_link { struct bpf_link link; struct fprobe fp; unsigned long *addrs; + /* + * The run_ctx here is used to get struct bpf_kprobe_multi_link in + * get_attach_cookie helper, so it can't be used to store data. + */ + struct bpf_run_ctx run_ctx; + u64 *cookies; + u32 cnt; }; static void bpf_kprobe_multi_link_release(struct bpf_link *link) @@ -2219,6 +2243,7 @@ static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); kvfree(kmulti_link->addrs); + kvfree(kmulti_link->cookies); kfree(kmulti_link); } @@ -2227,10 +2252,60 @@ static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { .dealloc = bpf_kprobe_multi_link_dealloc, }; +static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) +{ + const struct bpf_kprobe_multi_link *link = priv; + unsigned long *addr_a = a, *addr_b = b; + u64 *cookie_a, *cookie_b; + unsigned long tmp1; + u64 tmp2; + + cookie_a = link->cookies + (addr_a - link->addrs); + cookie_b = link->cookies + (addr_b - link->addrs); + + /* swap addr_a/addr_b and cookie_a/cookie_b values */ + tmp1 = *addr_a; *addr_a = *addr_b; *addr_b = tmp1; + tmp2 = *cookie_a; *cookie_a = *cookie_b; *cookie_b = tmp2; +} + +static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b) +{ + const unsigned long *addr_a = a, *addr_b = b; + + if (*addr_a == *addr_b) + return 0; + return *addr_a < *addr_b ? -1 : 1; +} + +static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) +{ + return __bpf_kprobe_multi_cookie_cmp(a, b); +} + +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +{ + struct bpf_kprobe_multi_link *link; + unsigned long *addr; + u64 *cookie; + + if (WARN_ON_ONCE(!ctx)) + return 0; + link = container_of(ctx, struct bpf_kprobe_multi_link, run_ctx); + if (!link->cookies) + return 0; + addr = bsearch(&ip, link->addrs, link->cnt, sizeof(ip), + __bpf_kprobe_multi_cookie_cmp); + if (!addr) + return 0; + cookie = link->cookies + (addr - link->addrs); + return *cookie; +} + static int kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, struct pt_regs *regs) { + struct bpf_run_ctx *old_run_ctx; int err; if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { @@ -2240,7 +2315,9 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, migrate_disable(); rcu_read_lock(); + old_run_ctx = bpf_set_run_ctx(&link->run_ctx); err = bpf_prog_run(link->link.prog, regs); + bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); @@ -2326,9 +2403,11 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { struct bpf_kprobe_multi_link *link = NULL; struct bpf_link_primer link_primer; + void __user *ucookies; unsigned long *addrs; u32 flags, cnt, size; void __user *uaddrs; + u64 *cookies = NULL; void __user *usyms; int err; @@ -2368,6 +2447,19 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr goto error; } + ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); + if (ucookies) { + cookies = kvmalloc(size, GFP_KERNEL); + if (!cookies) { + err = -ENOMEM; + goto error; + } + if (copy_from_user(cookies, ucookies, size)) { + err = -EFAULT; + goto error; + } + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { err = -ENOMEM; @@ -2387,6 +2479,21 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr link->fp.entry_handler = kprobe_multi_link_handler; link->addrs = addrs; + link->cookies = cookies; + link->cnt = cnt; + + if (cookies) { + /* + * Sorting addresses will trigger sorting cookies as well + * (check bpf_kprobe_multi_cookie_swap). This way we can + * find cookie based on the address in bpf_get_attach_cookie + * helper. + */ + sort_r(addrs, cnt, sizeof(*addrs), + bpf_kprobe_multi_cookie_cmp, + bpf_kprobe_multi_cookie_swap, + link); + } err = register_fprobe_ips(&link->fp, addrs, cnt); if (err) { @@ -2399,6 +2506,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr error: kfree(link); kvfree(addrs); + kvfree(cookies); return err; } #else /* !CONFIG_FPROBE */ @@ -2406,4 +2514,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { return -EOPNOTSUPP; } +static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx, u64 ip) +{ + return 0; +} #endif diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d77f47af7752..7604e7d5438f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1487,6 +1487,7 @@ union bpf_attr { __u32 cnt; __aligned_u64 syms; __aligned_u64 addrs; + __aligned_u64 cookies; } kprobe_multi; }; } link_create; -- cgit v1.2.3-71-gd317 From 54f586a9153201c6cff55e1f561990c78bd99aa7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 16 Mar 2022 21:27:51 +0100 Subject: rfkill: make new event layout opt-in Again new complaints surfaced that we had broken the ABI here, although previously all the userspace tools had agreed that it was their mistake and fixed it. Yet now there are cases (e.g. RHEL) that want to run old userspace with newer kernels, and thus are broken. Since this is a bit of a whack-a-mole thing, change the whole extensibility scheme of rfkill to no longer just rely on the message lengths, but instead require userspace to opt in via a new ioctl to a given maximum event size that it is willing to understand. By default, set that to RFKILL_EVENT_SIZE_V1 (8), so that the behaviour for userspace not calling the ioctl will look as if it's just running on an older kernel. Fixes: 14486c82612a ("rfkill: add a reason to the HW rfkill state") Cc: stable@vger.kernel.org # 5.11+ Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220316212749.16491491b270.Ifcb1950998330a596f29a2a162e00b7546a1d6d0@changeid --- include/uapi/linux/rfkill.h | 14 +++++++++++-- net/rfkill/core.c | 48 ++++++++++++++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 16 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 9b77cfc42efa..283c5a7b3f2c 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -159,8 +159,16 @@ struct rfkill_event_ext { * old behaviour for all userspace, unless it explicitly opts in to the * rules outlined here by using the new &struct rfkill_event_ext. * - * Userspace using &struct rfkill_event_ext must adhere to the following - * rules + * Additionally, some other userspace (bluez, g-s-d) was reading with a + * large size but as streaming reads rather than message-based, or with + * too strict checks for the returned size. So eventually, we completely + * reverted this, and extended messages need to be opted in to by using + * an ioctl: + * + * ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext)); + * + * Userspace using &struct rfkill_event_ext and the ioctl must adhere to + * the following rules: * * 1. accept short writes, optionally using them to detect that it's * running on an older kernel; @@ -175,6 +183,8 @@ struct rfkill_event_ext { #define RFKILL_IOC_MAGIC 'R' #define RFKILL_IOC_NOINPUT 1 #define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) +#define RFKILL_IOC_MAX_SIZE 2 +#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32) /* and that's all userspace gets */ diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 5b1927d66f0d..dac4fdc7488a 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -78,6 +78,7 @@ struct rfkill_data { struct mutex mtx; wait_queue_head_t read_wait; bool input_handler; + u8 max_size; }; @@ -1153,6 +1154,8 @@ static int rfkill_fop_open(struct inode *inode, struct file *file) if (!data) return -ENOMEM; + data->max_size = RFKILL_EVENT_SIZE_V1; + INIT_LIST_HEAD(&data->events); mutex_init(&data->mtx); init_waitqueue_head(&data->read_wait); @@ -1235,6 +1238,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, list); sz = min_t(unsigned long, sizeof(ev->ev), count); + sz = min_t(unsigned long, sz, data->max_size); ret = sz; if (copy_to_user(buf, &ev->ev, sz)) ret = -EFAULT; @@ -1249,6 +1253,7 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { + struct rfkill_data *data = file->private_data; struct rfkill *rfkill; struct rfkill_event_ext ev; int ret; @@ -1263,6 +1268,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, * our API version even in a write() call, if it cares. */ count = min(count, sizeof(ev)); + count = min_t(size_t, count, data->max_size); if (copy_from_user(&ev, buf, count)) return -EFAULT; @@ -1322,31 +1328,47 @@ static int rfkill_fop_release(struct inode *inode, struct file *file) return 0; } -#ifdef CONFIG_RFKILL_INPUT static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct rfkill_data *data = file->private_data; + int ret = -ENOSYS; + u32 size; if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) return -ENOSYS; - if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) - return -ENOSYS; - mutex_lock(&data->mtx); - - if (!data->input_handler) { - if (atomic_inc_return(&rfkill_input_disabled) == 1) - printk(KERN_DEBUG "rfkill: input handler disabled\n"); - data->input_handler = true; + switch (_IOC_NR(cmd)) { +#ifdef CONFIG_RFKILL_INPUT + case RFKILL_IOC_NOINPUT: + if (!data->input_handler) { + if (atomic_inc_return(&rfkill_input_disabled) == 1) + printk(KERN_DEBUG "rfkill: input handler disabled\n"); + data->input_handler = true; + } + ret = 0; + break; +#endif + case RFKILL_IOC_MAX_SIZE: + if (get_user(size, (__u32 __user *)arg)) { + ret = -EFAULT; + break; + } + if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) { + ret = -EINVAL; + break; + } + data->max_size = size; + ret = 0; + break; + default: + break; } - mutex_unlock(&data->mtx); - return 0; + return ret; } -#endif static const struct file_operations rfkill_fops = { .owner = THIS_MODULE, @@ -1355,10 +1377,8 @@ static const struct file_operations rfkill_fops = { .write = rfkill_fop_write, .poll = rfkill_fop_poll, .release = rfkill_fop_release, -#ifdef CONFIG_RFKILL_INPUT .unlocked_ioctl = rfkill_fop_ioctl, .compat_ioctl = compat_ptr_ioctl, -#endif .llseek = no_llseek, }; -- cgit v1.2.3-71-gd317 From 73799a889262b4675799bec20a2765be6d6a3f98 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 15 Mar 2022 15:38:54 -0400 Subject: counter: add new COUNTER_EVENT_CHANGE_OF_STATE Add new counter event to notify user space about every new counter pulse. Link: https://lore.kernel.org/r/20220203135727.2374052-2-o.rempel@pengutronix.de Signed-off-by: Oleksij Rempel Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/r/486a5de67414470449efb84d06a2f2214f4bb31d.1647373009.git.vilhelm.gray@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/counter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h index d0aa95aeff7b..96c5ffd368ad 100644 --- a/include/uapi/linux/counter.h +++ b/include/uapi/linux/counter.h @@ -61,6 +61,8 @@ enum counter_event_type { COUNTER_EVENT_THRESHOLD, /* Index signal detected */ COUNTER_EVENT_INDEX, + /* State of counter is changed */ + COUNTER_EVENT_CHANGE_OF_STATE, }; /** -- cgit v1.2.3-71-gd317 From 336d4b814bf078fa698488632c19beca47308896 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 27 Jan 2022 12:15:32 -0600 Subject: ptrace: Move setting/clearing ptrace_message into ptrace_stop Today ptrace_message is easy to overlook as it not a core part of ptrace_stop. It has been overlooked so much that there are places that set ptrace_message and don't clear it, and places that never set it. So if you get an unlucky sequence of events the ptracer may be able to read a ptrace_message that does not apply to the current ptrace stop. Move setting of ptrace_message into ptrace_stop so that it always gets set before the stop, and always gets cleared after the stop. This prevents non-sense from being reported to userspace and makes ptrace_message more visible in the ptrace helper functions so that kernel developers can see it. Link: https://lkml.kernel.org/r/87bky67qfv.fsf_-_@email.froward.int.ebiederm.org Acked-by: Oleg Nesterov Reviewed-by: Kees Cook Signed-off-by: "Eric W. Biederman" --- include/linux/ptrace.h | 9 +++------ include/uapi/linux/ptrace.h | 2 +- kernel/signal.c | 21 ++++++++++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 5310f43e4762..3e6b46e2b7be 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -60,7 +60,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned extern void ptrace_disable(struct task_struct *); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); -extern void ptrace_notify(int exit_code); +extern void ptrace_notify(int exit_code, unsigned long message); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, const struct cred *ptracer_cred); @@ -155,8 +155,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event) static inline void ptrace_event(int event, unsigned long message) { if (unlikely(ptrace_event_enabled(current, event))) { - current->ptrace_message = message; - ptrace_notify((event << 8) | SIGTRAP); + ptrace_notify((event << 8) | SIGTRAP, message); } else if (event == PTRACE_EVENT_EXEC) { /* legacy EXEC report via SIGTRAP */ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) @@ -424,8 +423,7 @@ static inline int ptrace_report_syscall(unsigned long message) if (!(ptrace & PT_PTRACED)) return 0; - current->ptrace_message = message; - ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), message); /* * this isn't the same as continuing with a signal, but it will do @@ -437,7 +435,6 @@ static inline int ptrace_report_syscall(unsigned long message) current->exit_code = 0; } - current->ptrace_message = 0; return fatal_signal_pending(current); } diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index b7af92e07d1f..195ae64a8c87 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -114,7 +114,7 @@ struct ptrace_rseq_configuration { /* * These values are stored in task->ptrace_message - * by ptrace_report_syscall_* to describe the current syscall-stop. + * by ptrace_stop to describe the current syscall-stop. */ #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 diff --git a/kernel/signal.c b/kernel/signal.c index c2dee5420567..a49ac7149256 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2191,7 +2191,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, * If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. */ -static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) +static void ptrace_stop(int exit_code, int why, int clear_code, + unsigned long message, kernel_siginfo_t *info) __releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) { @@ -2237,6 +2238,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t */ smp_wmb(); + current->ptrace_message = message; current->last_siginfo = info; current->exit_code = exit_code; @@ -2315,6 +2317,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t */ spin_lock_irq(¤t->sighand->siglock); current->last_siginfo = NULL; + current->ptrace_message = 0; /* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; @@ -2327,7 +2330,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t recalc_sigpending_tsk(current); } -static void ptrace_do_notify(int signr, int exit_code, int why) +static void ptrace_do_notify(int signr, int exit_code, int why, unsigned long message) { kernel_siginfo_t info; @@ -2338,17 +2341,17 @@ static void ptrace_do_notify(int signr, int exit_code, int why) info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); /* Let the debugger run. */ - ptrace_stop(exit_code, why, 1, &info); + ptrace_stop(exit_code, why, 1, message, &info); } -void ptrace_notify(int exit_code) +void ptrace_notify(int exit_code, unsigned long message) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); if (unlikely(task_work_pending(current))) task_work_run(); spin_lock_irq(¤t->sighand->siglock); - ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); + ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message); spin_unlock_irq(¤t->sighand->siglock); } @@ -2504,10 +2507,10 @@ static void do_jobctl_trap(void) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), - CLD_STOPPED); + CLD_STOPPED, 0); } else { WARN_ON_ONCE(!signr); - ptrace_stop(signr, CLD_STOPPED, 0, NULL); + ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL); current->exit_code = 0; } } @@ -2561,7 +2564,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type) * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; - ptrace_stop(signr, CLD_TRAPPED, 0, info); + ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); /* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; @@ -2891,7 +2894,7 @@ static void signal_delivered(struct ksignal *ksig, int stepping) if (current->sas_ss_flags & SS_AUTODISARM) sas_ss_reset(current); if (stepping) - ptrace_notify(SIGTRAP); + ptrace_notify(SIGTRAP, 0); } void signal_setup_done(int failed, struct ksignal *ksig, int stepping) -- cgit v1.2.3-71-gd317 From ee2a098851bfbe8bcdd964c0121f4246f00ff41e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 14 Mar 2022 11:20:41 -0700 Subject: bpf: Adjust BPF stack helper functions to accommodate skip > 0 Let's say that the caller has storage for num_elem stack frames. Then, the BPF stack helper functions walk the stack for only num_elem frames. This means that if skip > 0, one keeps only 'num_elem - skip' frames. This is because it sets init_nr in the perf_callchain_entry to the end of the buffer to save num_elem entries only. I believe it was because the perf callchain code unwound the stack frames until it reached the global max size (sysctl_perf_event_max_stack). However it now has perf_callchain_entry_ctx.max_stack to limit the iteration locally. This simplifies the code to handle init_nr in the BPF callstack entries and removes the confusion with the perf_event's __PERF_SAMPLE_CALLCHAIN_EARLY which sets init_nr to 0. Also change the comment on bpf_get_stack() in the header file to be more explicit what the return value means. Fixes: c195651e565a ("bpf: add bpf_get_stack helper") Signed-off-by: Namhyung Kim Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/30a7b5d5-6726-1cc2-eaee-8da2828a9a9c@oracle.com Link: https://lore.kernel.org/bpf/20220314182042.71025-1-namhyung@kernel.org Based-on-patch-by: Eugene Loh --- include/uapi/linux/bpf.h | 8 +++---- kernel/bpf/stackmap.c | 56 +++++++++++++++++++++--------------------------- 2 files changed, 28 insertions(+), 36 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7604e7d5438f..d14b10b85e51 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3009,8 +3009,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description @@ -4316,8 +4316,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 38bdfcd06f55..34725bfa1e97 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -176,7 +176,7 @@ build_id_valid: } static struct perf_callchain_entry * -get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; @@ -187,9 +187,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) if (!entry) return NULL; - entry->nr = init_nr + - stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), - sysctl_perf_event_max_stack - init_nr, 0); + entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, + max_depth, 0); /* stack_trace_save_tsk() works on unsigned long array, while * perf_callchain_entry uses u64 array. For 32-bit systems, it is @@ -201,7 +200,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) int i; /* copy data from the end to avoid using extra buffer */ - for (i = entry->nr - 1; i >= (int)init_nr; i--) + for (i = entry->nr - 1; i >= 0; i--) to[i] = (u64)(from[i]); } @@ -218,27 +217,19 @@ static long __bpf_get_stackid(struct bpf_map *map, { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *bucket, *new_bucket, *old_bucket; - u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; u64 *ips; bool hash_matches; - /* get_perf_callchain() guarantees that trace->nr >= init_nr - * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth - */ - trace_nr = trace->nr - init_nr; - - if (trace_nr <= skip) + if (trace->nr <= skip) /* skipping more than usable stack trace */ return -EFAULT; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_len = trace_nr * sizeof(u64); - ips = trace->ip + skip + init_nr; + ips = trace->ip + skip; hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); id = hash & (smap->n_buckets - 1); bucket = READ_ONCE(smap->buckets[id]); @@ -295,8 +286,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; + u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; @@ -305,8 +295,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + max_depth += skip; + if (max_depth > sysctl_perf_event_max_stack) + max_depth = sysctl_perf_event_max_stack; + + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, + false, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ @@ -397,7 +391,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { - u32 init_nr, trace_nr, copy_len, elem_size, num_elem; + u32 trace_nr, copy_len, elem_size, num_elem, max_depth; bool user_build_id = flags & BPF_F_USER_BUILD_ID; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; @@ -422,30 +416,28 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, goto err_fault; num_elem = size / elem_size; - if (sysctl_perf_event_max_stack < num_elem) - init_nr = 0; - else - init_nr = sysctl_perf_event_max_stack - num_elem; + max_depth = num_elem + skip; + if (sysctl_perf_event_max_stack < max_depth) + max_depth = sysctl_perf_event_max_stack; if (trace_in) trace = trace_in; else if (kernel && task) - trace = get_callchain_entry_for_task(task, init_nr); + trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, false, false); if (unlikely(!trace)) goto err_fault; - trace_nr = trace->nr - init_nr; - if (trace_nr < skip) + if (trace->nr < skip) goto err_fault; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; - ips = trace->ip + skip + init_nr; + + ips = trace->ip + skip; if (user && user_build_id) stack_map_get_build_id_offset(buf, ips, trace_nr, user); else -- cgit v1.2.3-71-gd317 From 6d8491910fcd3324d0f0ece3bd68e85ead3a04d7 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 1 Mar 2022 06:03:47 +0000 Subject: KVM: x86: Introduce KVM_CAP_DISABLE_QUIRKS2 KVM_CAP_DISABLE_QUIRKS is irrevocably broken. The capability does not advertise the set of quirks which may be disabled to userspace, so it is impossible to predict the behavior of KVM. Worse yet, KVM_CAP_DISABLE_QUIRKS will tolerate any value for cap->args[0], meaning it fails to reject attempts to set invalid quirk bits. The only valid workaround for the quirky quirks API is to add a new CAP. Actually advertise the set of quirks that can be disabled to userspace so it can predict KVM's behavior. Reject values for cap->args[0] that contain invalid bits. Finally, add documentation for the new capability and describe the existing quirks. Signed-off-by: Oliver Upton Message-Id: <20220301060351.442881-5-oupton@google.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 50 +++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/kvm_host.h | 7 ++++++ arch/x86/kvm/x86.c | 8 +++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 66 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 55efa82e37eb..07a45474abe9 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -7083,6 +7083,56 @@ resource that is controlled with the H_SET_MODE hypercall. This capability allows a guest kernel to use a better-performance mode for handling interrupts and system calls. +7.31 KVM_CAP_DISABLE_QUIRKS2 +---------------------------- + +:Capability: KVM_CAP_DISABLE_QUIRKS2 +:Parameters: args[0] - set of KVM quirks to disable +:Architectures: x86 +:Type: vm + +This capability, if enabled, will cause KVM to disable some behavior +quirks. + +Calling KVM_CHECK_EXTENSION for this capability returns a bitmask of +quirks that can be disabled in KVM. + +The argument to KVM_ENABLE_CAP for this capability is a bitmask of +quirks to disable, and must be a subset of the bitmask returned by +KVM_CHECK_EXTENSION. + +The valid bits in cap.args[0] are: + +=================================== ============================================ + KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT + LINT0 register is 0x700 (APIC_MODE_EXTINT). + When this quirk is disabled, the reset value + is 0x10000 (APIC_LVT_MASKED). + + KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW. + When this quirk is disabled, KVM does not + change the value of CR0.CD and CR0.NW. + + KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is + available even when configured for x2APIC + mode. When this quirk is disabled, KVM + disables the MMIO LAPIC interface if the + LAPIC is in x2APIC mode. + + KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before + exiting to userspace for an OUT instruction + to port 0x7e. When this quirk is disabled, + KVM does not pre-increment %rip before + exiting to userspace. + + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets + CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if + IA32_MISC_ENABLE[bit 18] (MWAIT) is set. + Additionally, when this quirk is disabled, + KVM clears CPUID.01H:ECX[bit 3] if + IA32_MISC_ENABLE[bit 18] is cleared. +=================================== ============================================ + 8. Other capabilities. ====================== diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3a2c855f04e3..0ddc2e67a731 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1966,4 +1966,11 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); #define KVM_CLOCK_VALID_FLAGS \ (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC) +#define KVM_X86_VALID_QUIRKS \ + (KVM_X86_QUIRK_LINT0_REENABLED | \ + KVM_X86_QUIRK_CD_NW_CLEARED | \ + KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \ + KVM_X86_QUIRK_OUT_7E_INC_RIP | \ + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c58265d9f1b2..fe2171b11441 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4363,6 +4363,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0; break; } + case KVM_CAP_DISABLE_QUIRKS2: + r = KVM_X86_VALID_QUIRKS; + break; default: break; } @@ -5909,6 +5912,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, return -EINVAL; switch (cap->cap) { + case KVM_CAP_DISABLE_QUIRKS2: + r = -EINVAL; + if (cap->args[0] & ~KVM_X86_VALID_QUIRKS) + break; + fallthrough; case KVM_CAP_DISABLE_QUIRKS: kvm->arch.disabled_quirks = cap->args[0]; r = 0; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d2f1efc3aa35..91a6fe4e02c0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1143,6 +1143,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_AIL_MODE_3 210 #define KVM_CAP_S390_MEM_OP_EXTENSION 211 #define KVM_CAP_PMU_CAPABILITY 212 +#define KVM_CAP_DISABLE_QUIRKS2 213 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From 824ddc601adc2cc48efb7f58b57997986c1c1276 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 22 Mar 2022 14:45:32 -0700 Subject: userfaultfd: provide unmasked address on page-fault Userfaultfd is supposed to provide the full address (i.e., unmasked) of the faulting access back to userspace. However, that is not the case for quite some time. Even running "userfaultfd_demo" from the userfaultfd man page provides the wrong output (and contradicts the man page). Notice that "UFFD_EVENT_PAGEFAULT event" shows the masked address (7fc5e30b3000) and not the first read address (0x7fc5e30b300f). Address returned by mmap() = 0x7fc5e30b3000 fault_handler_thread(): poll() returns: nready = 1; POLLIN = 1; POLLERR = 0 UFFD_EVENT_PAGEFAULT event: flags = 0; address = 7fc5e30b3000 (uffdio_copy.copy returned 4096) Read address 0x7fc5e30b300f in main(): A Read address 0x7fc5e30b340f in main(): A Read address 0x7fc5e30b380f in main(): A Read address 0x7fc5e30b3c0f in main(): A The exact address is useful for various reasons and specifically for prefetching decisions. If it is known that the memory is populated by certain objects whose size is not page-aligned, then based on the faulting address, the uffd-monitor can decide whether to prefetch and prefault the adjacent page. This bug has been for quite some time in the kernel: since commit 1a29d85eb0f1 ("mm: use vmf->address instead of of vmf->virtual_address") vmf->virtual_address"), which dates back to 2016. A concern has been raised that existing userspace application might rely on the old/wrong behavior in which the address is masked. Therefore, it was suggested to provide the masked address unless the user explicitly asks for the exact address. Add a new userfaultfd feature UFFD_FEATURE_EXACT_ADDRESS to direct userfaultfd to provide the exact address. Add a new "real_address" field to vmf to hold the unmasked address. Provide the address to userspace accordingly. Initialize real_address in various code-paths to be consistent with address, even when it is not used, to be on the safe side. [namit@vmware.com: initialize real_address on all code paths, per Jan] Link: https://lkml.kernel.org/r/20220226022655.350562-1-namit@vmware.com [akpm@linux-foundation.org: fix typo in comment, per Jan] Link: https://lkml.kernel.org/r/20220218041003.3508-1-namit@vmware.com Signed-off-by: Nadav Amit Acked-by: Peter Xu Reviewed-by: David Hildenbrand Acked-by: Mike Rapoport Reviewed-by: Jan Kara Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 5 ++++- include/linux/mm.h | 3 ++- include/uapi/linux/userfaultfd.h | 8 +++++++- mm/hugetlb.c | 6 ++++-- mm/memory.c | 1 + mm/swapfile.c | 1 + 6 files changed, 19 insertions(+), 5 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 8e03b3d3f5fa..aa0c47cb0d16 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -198,6 +198,9 @@ static inline struct uffd_msg userfault_msg(unsigned long address, struct uffd_msg msg; msg_init(&msg); msg.event = UFFD_EVENT_PAGEFAULT; + + if (!(features & UFFD_FEATURE_EXACT_ADDRESS)) + address &= PAGE_MASK; msg.arg.pagefault.address = address; /* * These flags indicate why the userfault occurred: @@ -482,7 +485,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; - uwq.msg = userfault_msg(vmf->address, vmf->flags, reason, + uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason, ctx->features); uwq.ctx = ctx; uwq.waken = false; diff --git a/include/linux/mm.h b/include/linux/mm.h index 9d58321386cc..0e4fd101616e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -478,7 +478,8 @@ struct vm_fault { struct vm_area_struct *vma; /* Target VMA */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - unsigned long address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address - masked */ + unsigned long real_address; /* Faulting virtual address - unmasked */ }; enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 05b31d60acf6..ef739054cb1c 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -32,7 +32,8 @@ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ UFFD_FEATURE_MINOR_HUGETLBFS | \ - UFFD_FEATURE_MINOR_SHMEM) + UFFD_FEATURE_MINOR_SHMEM | \ + UFFD_FEATURE_EXACT_ADDRESS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -189,6 +190,10 @@ struct uffdio_api { * * UFFD_FEATURE_MINOR_SHMEM indicates the same support as * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. + * + * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page + * faults would be provided and the offset within the page would not be + * masked. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -201,6 +206,7 @@ struct uffdio_api { #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) #define UFFD_FEATURE_MINOR_SHMEM (1<<10) +#define UFFD_FEATURE_EXACT_ADDRESS (1<<11) __u64 features; __u64 ioctls; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f425147e4ce3..75b41879e9e9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5341,6 +5341,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, pgoff_t idx, unsigned int flags, unsigned long haddr, + unsigned long addr, unsigned long reason) { vm_fault_t ret; @@ -5348,6 +5349,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, struct vm_fault vmf = { .vma = vma, .address = haddr, + .real_address = addr, .flags = flags, /* @@ -5416,7 +5418,7 @@ retry: /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { ret = hugetlb_handle_userfault(vma, mapping, idx, - flags, haddr, + flags, haddr, address, VM_UFFD_MISSING); goto out; } @@ -5480,7 +5482,7 @@ retry: unlock_page(page); put_page(page); ret = hugetlb_handle_userfault(vma, mapping, idx, - flags, haddr, + flags, haddr, address, VM_UFFD_MINOR); goto out; } diff --git a/mm/memory.c b/mm/memory.c index 1a55b4c5b5db..e0f3410fa70c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4633,6 +4633,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, struct vm_fault vmf = { .vma = vma, .address = address & PAGE_MASK, + .real_address = address, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), diff --git a/mm/swapfile.c b/mm/swapfile.c index bf0df7aa7158..33c7abb16610 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1951,6 +1951,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, struct vm_fault vmf = { .vma = vma, .address = addr, + .real_address = addr, .pmd = pmd, }; -- cgit v1.2.3-71-gd317 From e99653afeb9585350644c5ae4b0ca987cbe8d053 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 9 Mar 2022 17:22:53 +0100 Subject: rtc: add new RTC_FEATURE_ALARM_WAKEUP_ONLY feature Some RTCs have an IRQ pin that is not connected to a CPU interrupt but rather directly to a PMIC or power supply. In that case, it is still useful to be able to set alarms but we shouldn't expect interrupts. Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20220309162301.61679-22-alexandre.belloni@bootlin.com --- include/uapi/linux/rtc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h index 03e5b776e597..97aca4503a6a 100644 --- a/include/uapi/linux/rtc.h +++ b/include/uapi/linux/rtc.h @@ -133,7 +133,8 @@ struct rtc_param { #define RTC_FEATURE_UPDATE_INTERRUPT 4 #define RTC_FEATURE_CORRECTION 5 #define RTC_FEATURE_BACKUP_SWITCH_MODE 6 -#define RTC_FEATURE_CNT 7 +#define RTC_FEATURE_ALARM_WAKEUP_ONLY 7 +#define RTC_FEATURE_CNT 8 /* parameter list */ #define RTC_PARAM_FEATURES 0 -- cgit v1.2.3-71-gd317 From c724c866bb70cb8c607081a26823a1f0ebde4387 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 23 Mar 2022 16:05:29 -0700 Subject: linux/types.h: remove unnecessary __bitwise__ There are no users of "__bitwise__" except the definition of "__bitwise". Remove __bitwise__ and define __bitwise directly. This is a follow-up to 05de97003c77 ("linux/types.h: enable endian checks for all sparse builds"). [akpm@linux-foundation.org: change the tools/include/linux/types.h definition also] Link: https://lkml.kernel.org/r/20220310220927.245704-2-helgaas@kernel.org Signed-off-by: Bjorn Helgaas Cc: Michael S. Tsirkin Cc: Jonathan Corbet Cc: Nathan Chancellor Cc: Nick Desaulniers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/types.h | 5 ++--- tools/include/linux/types.h | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index f6d2f83cbe29..71696f424ac8 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -20,11 +20,10 @@ */ #ifdef __CHECKER__ -#define __bitwise__ __attribute__((bitwise)) +#define __bitwise __attribute__((bitwise)) #else -#define __bitwise__ +#define __bitwise #endif -#define __bitwise __bitwise__ typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h index 6e14a533ab4e..6c18c54e7d7f 100644 --- a/tools/include/linux/types.h +++ b/tools/include/linux/types.h @@ -43,11 +43,10 @@ typedef __u8 u8; typedef __s8 s8; #ifdef __CHECKER__ -#define __bitwise__ __attribute__((bitwise)) +#define __bitwise __attribute__((bitwise)) #else -#define __bitwise__ +#define __bitwise #endif -#define __bitwise __bitwise__ #define __force #define __user -- cgit v1.2.3-71-gd317 From 179fd6ba3bacbf7b19cbdf9b14be109d54318394 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 23 Mar 2022 16:05:32 -0700 Subject: Documentation/sparse: add hints about __CHECKER__ Several attributes depend on __CHECKER__, but previously there was no clue in the tree about when __CHECKER__ might be defined. Add hints at the most common places (__kernel, __user, __iomem, __bitwise) and in the sparse documentation. Link: https://lkml.kernel.org/r/20220310220927.245704-3-helgaas@kernel.org Signed-off-by: Bjorn Helgaas Cc: Jonathan Corbet Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: "Michael S . Tsirkin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/dev-tools/sparse.rst | 2 ++ include/linux/compiler_types.h | 1 + include/uapi/linux/types.h | 1 + 3 files changed, 4 insertions(+) (limited to 'include/uapi/linux') diff --git a/Documentation/dev-tools/sparse.rst b/Documentation/dev-tools/sparse.rst index 02102be7ff49..dc791c8d84d1 100644 --- a/Documentation/dev-tools/sparse.rst +++ b/Documentation/dev-tools/sparse.rst @@ -100,3 +100,5 @@ have already built it. The optional make variable CF can be used to pass arguments to sparse. The build system passes -Wbitwise to sparse automatically. + +Note that sparse defines the __CHECKER__ preprocessor symbol. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3c1795fdb568..4b3915ce38a4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -4,6 +4,7 @@ #ifndef __ASSEMBLY__ +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ /* address spaces */ # define __kernel __attribute__((address_space(0))) diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 71696f424ac8..c4dc597f3dcf 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -19,6 +19,7 @@ * any application/library that wants linux/types.h. */ +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ #define __bitwise __attribute__((bitwise)) #else -- cgit v1.2.3-71-gd317 From 7ef66d186eb95f987a97fb3329b65c840e2dc9bf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 24 Mar 2022 06:53:18 -0600 Subject: io_uring: remove IORING_CQE_F_MSG This was introduced with the message ring opcode, but isn't strictly required for the request itself. The sender can encode what is needed in user_data, which is passed to the receiver. It's unclear if having a separate flag that essentially says "This CQE did not originate from an SQE on this ring" provides any real utility to applications. While we can always re-introduce a flag to provide this information, we cannot take it away at a later point in time. Remove the flag while we still can, before it's in a released kernel. Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 +-- include/uapi/linux/io_uring.h | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include/uapi/linux') diff --git a/fs/io_uring.c b/fs/io_uring.c index 88556e654c5a..28b7a1b8abb6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4474,8 +4474,7 @@ static int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) target_ctx = req->file->private_data; spin_lock(&target_ctx->completion_lock); - filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, - IORING_CQE_F_MSG); + filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0); io_commit_cqring(target_ctx); spin_unlock(&target_ctx->completion_lock); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d2be4eb22008..784adc6f6ed2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -201,11 +201,9 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries - * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) -#define IORING_CQE_F_MSG (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, -- cgit v1.2.3-71-gd317 From 90a6951b58e935124eeb7ecd9fbc2426f841ac0c Mon Sep 17 00:00:00 2001 From: Gautam Dawar Date: Tue, 15 Feb 2022 11:04:29 +0530 Subject: Add definition of VIRTIO_F_IN_ORDER feature bit This patch adds the definition of VIRTIO_F_IN_ORDER feature bit in the relevant header file to make it available in QEMU's linux standard header file virtio_config.h, which is updated using scripts/update-linux-headers.sh Signed-off-by: Gautam Dawar Link: https://lore.kernel.org/r/20220215053430.24650-1-gdawar@xilinx.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- include/uapi/linux/virtio_config.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index b5eda06f0d57..f0fb0ae021c0 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -82,6 +82,12 @@ /* This feature indicates support for the packed virtqueue layout. */ #define VIRTIO_F_RING_PACKED 34 +/* + * Inorder feature indicates that all buffers are used by the device + * in the same order in which they have been made available. + */ +#define VIRTIO_F_IN_ORDER 35 + /* * This feature indicates that memory accesses by the driver and the * device are ordered in a way described by the platform. -- cgit v1.2.3-71-gd317 From 13d640a3e9a3ac7ec694843d3d3b785e85fb8cb8 Mon Sep 17 00:00:00 2001 From: zhenwei pi Date: Wed, 2 Mar 2022 11:39:14 +0800 Subject: virtio_crypto: Introduce VIRTIO_CRYPTO_NOSPC Base on the lastest virtio crypto spec, define VIRTIO_CRYPTO_NOSPC. Reviewed-by: Gonglei Signed-off-by: zhenwei pi Link: https://lore.kernel.org/r/20220302033917.1295334-2-pizhenwei@bytedance.com Signed-off-by: Michael S. Tsirkin --- include/uapi/linux/virtio_crypto.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h index a03932f10565..1166a49084b0 100644 --- a/include/uapi/linux/virtio_crypto.h +++ b/include/uapi/linux/virtio_crypto.h @@ -408,6 +408,7 @@ struct virtio_crypto_op_data_req { #define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_NOTSUPP 3 #define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ +#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) -- cgit v1.2.3-71-gd317 From 24e19590628b58578748eeaec8140bf9c9dc00d9 Mon Sep 17 00:00:00 2001 From: zhenwei pi Date: Wed, 2 Mar 2022 11:39:15 +0800 Subject: virtio-crypto: introduce akcipher service Introduce asymmetric service definition, asymmetric operations and several well known algorithms. Co-developed-by: lei he Signed-off-by: lei he Signed-off-by: zhenwei pi Link: https://lore.kernel.org/r/20220302033917.1295334-3-pizhenwei@bytedance.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Gonglei --- include/uapi/linux/virtio_crypto.h | 81 +++++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h index 1166a49084b0..71a54a6849ca 100644 --- a/include/uapi/linux/virtio_crypto.h +++ b/include/uapi/linux/virtio_crypto.h @@ -37,6 +37,7 @@ #define VIRTIO_CRYPTO_SERVICE_HASH 1 #define VIRTIO_CRYPTO_SERVICE_MAC 2 #define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4 #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) @@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) +#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04) +#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05) __le32 opcode; __le32 algo; __le32 flag; @@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req { __u8 padding[32]; }; +struct virtio_crypto_rsa_session_para { +#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0 +#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1 + __le32 padding_algo; + +#define VIRTIO_CRYPTO_RSA_NO_HASH 0 +#define VIRTIO_CRYPTO_RSA_MD2 1 +#define VIRTIO_CRYPTO_RSA_MD3 2 +#define VIRTIO_CRYPTO_RSA_MD4 3 +#define VIRTIO_CRYPTO_RSA_MD5 4 +#define VIRTIO_CRYPTO_RSA_SHA1 5 +#define VIRTIO_CRYPTO_RSA_SHA256 6 +#define VIRTIO_CRYPTO_RSA_SHA384 7 +#define VIRTIO_CRYPTO_RSA_SHA512 8 +#define VIRTIO_CRYPTO_RSA_SHA224 9 + __le32 hash_algo; +}; + +struct virtio_crypto_ecdsa_session_para { +#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0 +#define VIRTIO_CRYPTO_CURVE_NIST_P192 1 +#define VIRTIO_CRYPTO_CURVE_NIST_P224 2 +#define VIRTIO_CRYPTO_CURVE_NIST_P256 3 +#define VIRTIO_CRYPTO_CURVE_NIST_P384 4 +#define VIRTIO_CRYPTO_CURVE_NIST_P521 5 + __le32 curve_id; + __le32 padding; +}; + +struct virtio_crypto_akcipher_session_para { +#define VIRTIO_CRYPTO_NO_AKCIPHER 0 +#define VIRTIO_CRYPTO_AKCIPHER_RSA 1 +#define VIRTIO_CRYPTO_AKCIPHER_DSA 2 +#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3 + __le32 algo; + +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1 +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2 + __le32 keytype; + __le32 keylen; + + union { + struct virtio_crypto_rsa_session_para rsa; + struct virtio_crypto_ecdsa_session_para ecdsa; + } u; +}; + +struct virtio_crypto_akcipher_create_session_req { + struct virtio_crypto_akcipher_session_para para; + __u8 padding[36]; +}; + struct virtio_crypto_alg_chain_session_para { #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 @@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req { mac_create_session; struct virtio_crypto_aead_create_session_req aead_create_session; + struct virtio_crypto_akcipher_create_session_req + akcipher_create_session; struct virtio_crypto_destroy_session_req destroy_session; __u8 padding[56]; @@ -266,6 +325,14 @@ struct virtio_crypto_op_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) #define VIRTIO_CRYPTO_AEAD_DECRYPT \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00) +#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_SIGN \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02) +#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03) __le32 opcode; /* algo should be service-specific algorithms */ __le32 algo; @@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req { __u8 padding[32]; }; +struct virtio_crypto_akcipher_para { + __le32 src_data_len; + __le32 dst_data_len; +}; + +struct virtio_crypto_akcipher_data_req { + struct virtio_crypto_akcipher_para para; + __u8 padding[40]; +}; + /* The request of the data virtqueue's packet */ struct virtio_crypto_op_data_req { struct virtio_crypto_op_header header; @@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req { struct virtio_crypto_hash_data_req hash_req; struct virtio_crypto_mac_data_req mac_req; struct virtio_crypto_aead_data_req aead_req; + struct virtio_crypto_akcipher_data_req akcipher_req; __u8 padding[48]; } u; }; @@ -409,6 +487,7 @@ struct virtio_crypto_op_data_req { #define VIRTIO_CRYPTO_NOTSUPP 3 #define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ #define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */ +#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) @@ -439,7 +518,7 @@ struct virtio_crypto_config { __le32 max_cipher_key_len; /* Maximum length of authenticated key */ __le32 max_auth_key_len; - __le32 reserve; + __le32 akcipher_algo; /* Maximum size of each crypto request's content */ __le64 max_size; }; -- cgit v1.2.3-71-gd317 From a61280ddddaa45f95b60dd54c05f8e0e5b6810b7 Mon Sep 17 00:00:00 2001 From: Longpeng Date: Tue, 15 Mar 2022 11:25:51 +0800 Subject: vdpa: support exposing the config size to userspace - GET_CONFIG_SIZE: return the size of the virtio config space. The size contains the fields which are conditional on feature bits. Acked-by: Jason Wang Signed-off-by: Longpeng Link: https://lore.kernel.org/r/20220315032553.455-2-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefano Garzarella --- drivers/vhost/vdpa.c | 17 +++++++++++++++++ include/linux/vdpa.h | 3 ++- include/uapi/linux/vhost.h | 4 ++++ 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 05f5fd2af58f..04375722c0e5 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -358,6 +358,20 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp) return 0; } +static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u32 size; + + size = ops->get_config_size(vdpa); + + if (copy_to_user(argp, &size, sizeof(size))) + return -EFAULT; + + return 0; +} + static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { @@ -495,6 +509,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_VDPA_GET_IOVA_RANGE: r = vhost_vdpa_get_iova_range(v, argp); break; + case VHOST_VDPA_GET_CONFIG_SIZE: + r = vhost_vdpa_get_config_size(v, argp); + break; default: r = vhost_dev_ioctl(&v->vdev, cmd, argp); if (r == -ENOIOCTLCMD) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 721089bb4c84..a5269191edda 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -207,7 +207,8 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) - * @get_config_size: Get the size of the configuration space + * @get_config_size: Get the size of the configuration space includes + * fields that are conditional on feature bits. * @vdev: vdpa device * Returns size_t: configuration size * @get_config: Read from device specific configuration space diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index c998860d7bbc..bc74e95a273a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -150,4 +150,8 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) + +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + #endif -- cgit v1.2.3-71-gd317 From b04d910af330b55e1d5d6eb9ecd53a375a9cf81c Mon Sep 17 00:00:00 2001 From: Longpeng Date: Tue, 15 Mar 2022 11:25:53 +0800 Subject: vdpa: support exposing the count of vqs to userspace - GET_VQS_COUNT: the count of virtqueues that exposed Signed-off-by: Longpeng Link: https://lore.kernel.org/r/20220315032553.455-4-longpeng2@huawei.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Signed-off-by: Longpeng <longpeng2@huawei.com>
Reviewed-by: Stefano Garzarella --- drivers/vhost/vdpa.c | 13 +++++++++++++ include/uapi/linux/vhost.h | 3 +++ 2 files changed, 16 insertions(+) (limited to 'include/uapi/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 3b7fc2fe45f3..4c2f0bd06285 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -373,6 +373,16 @@ static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp) return 0; } +static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + + if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs))) + return -EFAULT; + + return 0; +} + static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { @@ -513,6 +523,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_VDPA_GET_CONFIG_SIZE: r = vhost_vdpa_get_config_size(v, argp); break; + case VHOST_VDPA_GET_VQS_COUNT: + r = vhost_vdpa_get_vqs_count(v, argp); + break; default: r = vhost_dev_ioctl(&v->vdev, cmd, argp); if (r == -ENOIOCTLCMD) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index bc74e95a273a..5d99e7c242a2 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -154,4 +154,7 @@ /* Get the config size */ #define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) +/* Get the count of all virtqueues */ +#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) + #endif -- cgit v1.2.3-71-gd317 From f941c51eeac7ebe0f8ec30943bf78e7f60aad039 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Tue, 29 Mar 2022 20:18:15 +0000 Subject: loop: fix ioctl calls using compat_loop_info Support for cryptoloop was deleted in commit 47e9624616c8 ("block: remove support for cryptoloop and the xor transfer"), making the usage of loop_info->lo_encrypt_type obsolete. However, this member was also removed from the compat_loop_info definition and this breaks userspace ioctl calls for 32-bit binaries and CONFIG_COMPAT=y. This patch restores the compat_loop_info->lo_encrypt_type member and marks it obsolete as well as in the uapi header definitions. Fixes: 47e9624616c8 ("block: remove support for cryptoloop and the xor transfer") Signed-off-by: Carlos Llamas Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220329201815.1347500-1-cmllamas@google.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 1 + include/uapi/linux/loop.h | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/uapi/linux') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e733c48de2e9..39e7650f89b1 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1592,6 +1592,7 @@ struct compat_loop_info { compat_ulong_t lo_inode; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */ compat_int_t lo_offset; + compat_int_t lo_encrypt_type; /* obsolete, ignored */ compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_flags; /* ioctl r/o */ char lo_name[LO_NAME_SIZE]; diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 24a1c45bd1ae..98e60801195e 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -45,7 +45,7 @@ struct loop_info { unsigned long lo_inode; /* ioctl r/o */ __kernel_old_dev_t lo_rdevice; /* ioctl r/o */ int lo_offset; - int lo_encrypt_type; + int lo_encrypt_type; /* obsolete, ignored */ int lo_encrypt_key_size; /* ioctl w/o */ int lo_flags; char lo_name[LO_NAME_SIZE]; @@ -61,7 +61,7 @@ struct loop_info64 { __u64 lo_offset; __u64 lo_sizelimit;/* bytes, 0 == max available */ __u32 lo_number; /* ioctl r/o */ - __u32 lo_encrypt_type; + __u32 lo_encrypt_type; /* obsolete, ignored */ __u32 lo_encrypt_key_size; /* ioctl w/o */ __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; -- cgit v1.2.3-71-gd317 From 1cd927ad6f62f27d8908498dcbf61395c5dd5fe2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 1 Apr 2022 14:39:03 -0400 Subject: tracing: mark user_events as BROKEN After being merged, user_events become more visible to a wider audience that have concerns with the current API. It is too late to fix this for this release, but instead of a full revert, just mark it as BROKEN (which prevents it from being selected in make config). Then we can work finding a better API. If that fails, then it will need to be completely reverted. To not have the code silently bitrot, still allow building it with COMPILE_TEST. And to prevent the uapi header from being installed, then later changed, and then have an old distro user space see the old version, move the header file out of the uapi directory. Surround the include with CONFIG_COMPILE_TEST to the current location, but when the BROKEN tag is taken off, it will use the uapi directory, and fail to compile. This is a good way to remind us to move the header back. Link: https://lore.kernel.org/all/20220330155835.5e1f6669@gandalf.local.home Link: https://lkml.kernel.org/r/20220330201755.29319-1-mathieu.desnoyers@efficios.com Suggested-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) Signed-off-by: Linus Torvalds --- include/linux/user_events.h | 116 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/user_events.h | 116 --------------------------------------- kernel/trace/Kconfig | 1 + kernel/trace/trace_events_user.c | 5 ++ 4 files changed, 122 insertions(+), 116 deletions(-) create mode 100644 include/linux/user_events.h delete mode 100644 include/uapi/linux/user_events.h (limited to 'include/uapi/linux') diff --git a/include/linux/user_events.h b/include/linux/user_events.h new file mode 100644 index 000000000000..e570840571e1 --- /dev/null +++ b/include/linux/user_events.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave + */ +#ifndef _UAPI_LINUX_USER_EVENTS_H +#define _UAPI_LINUX_USER_EVENTS_H + +#include +#include + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#define USER_EVENTS_SYSTEM "user_events" +#define USER_EVENTS_PREFIX "u:" + +/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ +#define EVENT_BIT_FTRACE 0 +#define EVENT_BIT_PERF 1 +#define EVENT_BIT_OTHER 7 + +#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) +#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) +#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) + +/* Create dynamic location entry within a 32-bit value */ +#define DYN_LOC(offset, size) ((size) << 16 | (offset)) + +/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ +#define FLAG_BPF_ITER (1 << 0) + +/* + * Describes an event registration and stores the results of the registration. + * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum + * must set the size and name_args before invocation. + */ +struct user_reg { + + /* Input: Size of the user_reg structure being used */ + __u32 size; + + /* Input: Pointer to string with event name, description and flags */ + __u64 name_args; + + /* Output: Byte index of the event within the status page */ + __u32 status_index; + + /* Output: Index of the event to use when writing data */ + __u32 write_index; +}; + +#define DIAG_IOC_MAGIC '*' + +/* Requests to register a user_event */ +#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) + +/* Requests to delete a user_event */ +#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) + +/* Data type that was passed to the BPF program */ +enum { + /* Data resides in kernel space */ + USER_BPF_DATA_KERNEL, + + /* Data resides in user space */ + USER_BPF_DATA_USER, + + /* Data is a pointer to a user_bpf_iter structure */ + USER_BPF_DATA_ITER, +}; + +/* + * Describes an iovec iterator that BPF programs can use to access data for + * a given user_event write() / writev() call. + */ +struct user_bpf_iter { + + /* Offset of the data within the first iovec */ + __u32 iov_offset; + + /* Number of iovec structures */ + __u32 nr_segs; + + /* Pointer to iovec structures */ + const struct iovec *iov; +}; + +/* Context that BPF programs receive when attached to a user_event */ +struct user_bpf_context { + + /* Data type being passed (see union below) */ + __u32 data_type; + + /* Length of the data */ + __u32 data_len; + + /* Pointer to data, varies by data type */ + union { + /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ + void *kdata; + + /* User data (data_type == USER_BPF_DATA_USER) */ + void *udata; + + /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ + struct user_bpf_iter *iter; + }; +}; + +#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h deleted file mode 100644 index e570840571e1..000000000000 --- a/include/uapi/linux/user_events.h +++ /dev/null @@ -1,116 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (c) 2021, Microsoft Corporation. - * - * Authors: - * Beau Belgrave - */ -#ifndef _UAPI_LINUX_USER_EVENTS_H -#define _UAPI_LINUX_USER_EVENTS_H - -#include -#include - -#ifdef __KERNEL__ -#include -#else -#include -#endif - -#define USER_EVENTS_SYSTEM "user_events" -#define USER_EVENTS_PREFIX "u:" - -/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ -#define EVENT_BIT_FTRACE 0 -#define EVENT_BIT_PERF 1 -#define EVENT_BIT_OTHER 7 - -#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) -#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) -#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) - -/* Create dynamic location entry within a 32-bit value */ -#define DYN_LOC(offset, size) ((size) << 16 | (offset)) - -/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ -#define FLAG_BPF_ITER (1 << 0) - -/* - * Describes an event registration and stores the results of the registration. - * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum - * must set the size and name_args before invocation. - */ -struct user_reg { - - /* Input: Size of the user_reg structure being used */ - __u32 size; - - /* Input: Pointer to string with event name, description and flags */ - __u64 name_args; - - /* Output: Byte index of the event within the status page */ - __u32 status_index; - - /* Output: Index of the event to use when writing data */ - __u32 write_index; -}; - -#define DIAG_IOC_MAGIC '*' - -/* Requests to register a user_event */ -#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) - -/* Requests to delete a user_event */ -#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) - -/* Data type that was passed to the BPF program */ -enum { - /* Data resides in kernel space */ - USER_BPF_DATA_KERNEL, - - /* Data resides in user space */ - USER_BPF_DATA_USER, - - /* Data is a pointer to a user_bpf_iter structure */ - USER_BPF_DATA_ITER, -}; - -/* - * Describes an iovec iterator that BPF programs can use to access data for - * a given user_event write() / writev() call. - */ -struct user_bpf_iter { - - /* Offset of the data within the first iovec */ - __u32 iov_offset; - - /* Number of iovec structures */ - __u32 nr_segs; - - /* Pointer to iovec structures */ - const struct iovec *iov; -}; - -/* Context that BPF programs receive when attached to a user_event */ -struct user_bpf_context { - - /* Data type being passed (see union below) */ - __u32 data_type; - - /* Length of the data */ - __u32 data_len; - - /* Pointer to data, varies by data type */ - union { - /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ - void *kdata; - - /* User data (data_type == USER_BPF_DATA_USER) */ - void *udata; - - /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ - struct user_bpf_iter *iter; - }; -}; - -#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9bb54c0b3b2d..2c43e327a619 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -767,6 +767,7 @@ config USER_EVENTS bool "User trace events" select TRACING select DYNAMIC_EVENTS + depends on BROKEN || COMPILE_TEST # API needs to be straighten out help User trace events are user-defined trace events that can be used like an existing kernel trace event. User trace diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 8b3d241a31c2..68d62bfac12f 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -18,7 +18,12 @@ #include #include #include +/* Reminder to move to uapi when everything works */ +#ifdef CONFIG_COMPILE_TEST +#include +#else #include +#endif #include "trace.h" #include "trace_dynevent.h" -- cgit v1.2.3-71-gd317